~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/mmap_lock.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/mmap_lock.h (Version linux-6.11.5) and /include/linux/mmap_lock.h (Version linux-6.4.16)


  1 #ifndef _LINUX_MMAP_LOCK_H                          1 #ifndef _LINUX_MMAP_LOCK_H
  2 #define _LINUX_MMAP_LOCK_H                          2 #define _LINUX_MMAP_LOCK_H
  3                                                     3 
  4 #include <linux/lockdep.h>                          4 #include <linux/lockdep.h>
  5 #include <linux/mm_types.h>                         5 #include <linux/mm_types.h>
  6 #include <linux/mmdebug.h>                          6 #include <linux/mmdebug.h>
  7 #include <linux/rwsem.h>                            7 #include <linux/rwsem.h>
  8 #include <linux/tracepoint-defs.h>                  8 #include <linux/tracepoint-defs.h>
  9 #include <linux/types.h>                            9 #include <linux/types.h>
 10                                                    10 
 11 #define MMAP_LOCK_INITIALIZER(name) \              11 #define MMAP_LOCK_INITIALIZER(name) \
 12         .mmap_lock = __RWSEM_INITIALIZER((name     12         .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
 13                                                    13 
 14 DECLARE_TRACEPOINT(mmap_lock_start_locking);       14 DECLARE_TRACEPOINT(mmap_lock_start_locking);
 15 DECLARE_TRACEPOINT(mmap_lock_acquire_returned)     15 DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
 16 DECLARE_TRACEPOINT(mmap_lock_released);            16 DECLARE_TRACEPOINT(mmap_lock_released);
 17                                                    17 
 18 #ifdef CONFIG_TRACING                              18 #ifdef CONFIG_TRACING
 19                                                    19 
 20 void __mmap_lock_do_trace_start_locking(struct     20 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
 21 void __mmap_lock_do_trace_acquire_returned(str     21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
 22                                            boo     22                                            bool success);
 23 void __mmap_lock_do_trace_released(struct mm_s     23 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
 24                                                    24 
 25 static inline void __mmap_lock_trace_start_loc     25 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
 26                                                    26                                                    bool write)
 27 {                                                  27 {
 28         if (tracepoint_enabled(mmap_lock_start     28         if (tracepoint_enabled(mmap_lock_start_locking))
 29                 __mmap_lock_do_trace_start_loc     29                 __mmap_lock_do_trace_start_locking(mm, write);
 30 }                                                  30 }
 31                                                    31 
 32 static inline void __mmap_lock_trace_acquire_r     32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
 33                                                    33                                                       bool write, bool success)
 34 {                                                  34 {
 35         if (tracepoint_enabled(mmap_lock_acqui     35         if (tracepoint_enabled(mmap_lock_acquire_returned))
 36                 __mmap_lock_do_trace_acquire_r     36                 __mmap_lock_do_trace_acquire_returned(mm, write, success);
 37 }                                                  37 }
 38                                                    38 
 39 static inline void __mmap_lock_trace_released(     39 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
 40 {                                                  40 {
 41         if (tracepoint_enabled(mmap_lock_relea     41         if (tracepoint_enabled(mmap_lock_released))
 42                 __mmap_lock_do_trace_released(     42                 __mmap_lock_do_trace_released(mm, write);
 43 }                                                  43 }
 44                                                    44 
 45 #else /* !CONFIG_TRACING */                        45 #else /* !CONFIG_TRACING */
 46                                                    46 
 47 static inline void __mmap_lock_trace_start_loc     47 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
 48                                                    48                                                    bool write)
 49 {                                                  49 {
 50 }                                                  50 }
 51                                                    51 
 52 static inline void __mmap_lock_trace_acquire_r     52 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
 53                                                    53                                                       bool write, bool success)
 54 {                                                  54 {
 55 }                                                  55 }
 56                                                    56 
 57 static inline void __mmap_lock_trace_released(     57 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
 58 {                                                  58 {
 59 }                                                  59 }
 60                                                    60 
 61 #endif /* CONFIG_TRACING */                        61 #endif /* CONFIG_TRACING */
 62                                                    62 
 63 static inline void mmap_assert_locked(const st !!  63 static inline void mmap_assert_locked(struct mm_struct *mm)
 64 {                                                  64 {
 65         rwsem_assert_held(&mm->mmap_lock);     !!  65         lockdep_assert_held(&mm->mmap_lock);
                                                   >>  66         VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
 66 }                                                  67 }
 67                                                    68 
 68 static inline void mmap_assert_write_locked(co !!  69 static inline void mmap_assert_write_locked(struct mm_struct *mm)
 69 {                                                  70 {
 70         rwsem_assert_held_write(&mm->mmap_lock !!  71         lockdep_assert_held_write(&mm->mmap_lock);
                                                   >>  72         VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
 71 }                                                  73 }
 72                                                    74 
 73 #ifdef CONFIG_PER_VMA_LOCK                         75 #ifdef CONFIG_PER_VMA_LOCK
 74 /*                                             << 
 75  * Drop all currently-held per-VMA locks.      << 
 76  * This is called from the mmap_lock implement << 
 77  * a write-locked mmap_lock (or downgrading it << 
 78  * This should normally NOT be called manually << 
 79  * If you want to call this manually anyway, k << 
 80  * *all* VMA write locks, including ones from  << 
 81  */                                            << 
 82 static inline void vma_end_write_all(struct mm     76 static inline void vma_end_write_all(struct mm_struct *mm)
 83 {                                                  77 {
 84         mmap_assert_write_locked(mm);              78         mmap_assert_write_locked(mm);
 85         /*                                         79         /*
 86          * Nobody can concurrently modify mm->     80          * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
 87          * mmap_lock being held.                   81          * mmap_lock being held.
 88          * We need RELEASE semantics here to e     82          * We need RELEASE semantics here to ensure that preceding stores into
 89          * the VMA take effect before we unloc     83          * the VMA take effect before we unlock it with this store.
 90          * Pairs with ACQUIRE semantics in vma     84          * Pairs with ACQUIRE semantics in vma_start_read().
 91          */                                        85          */
 92         smp_store_release(&mm->mm_lock_seq, mm     86         smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
 93 }                                                  87 }
 94 #else                                              88 #else
 95 static inline void vma_end_write_all(struct mm     89 static inline void vma_end_write_all(struct mm_struct *mm) {}
 96 #endif                                             90 #endif
 97                                                    91 
 98 static inline void mmap_init_lock(struct mm_st     92 static inline void mmap_init_lock(struct mm_struct *mm)
 99 {                                                  93 {
100         init_rwsem(&mm->mmap_lock);                94         init_rwsem(&mm->mmap_lock);
101 }                                                  95 }
102                                                    96 
103 static inline void mmap_write_lock(struct mm_s     97 static inline void mmap_write_lock(struct mm_struct *mm)
104 {                                                  98 {
105         __mmap_lock_trace_start_locking(mm, tr     99         __mmap_lock_trace_start_locking(mm, true);
106         down_write(&mm->mmap_lock);               100         down_write(&mm->mmap_lock);
107         __mmap_lock_trace_acquire_returned(mm,    101         __mmap_lock_trace_acquire_returned(mm, true, true);
108 }                                                 102 }
109                                                   103 
110 static inline void mmap_write_lock_nested(stru    104 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
111 {                                                 105 {
112         __mmap_lock_trace_start_locking(mm, tr    106         __mmap_lock_trace_start_locking(mm, true);
113         down_write_nested(&mm->mmap_lock, subc    107         down_write_nested(&mm->mmap_lock, subclass);
114         __mmap_lock_trace_acquire_returned(mm,    108         __mmap_lock_trace_acquire_returned(mm, true, true);
115 }                                                 109 }
116                                                   110 
117 static inline int mmap_write_lock_killable(str    111 static inline int mmap_write_lock_killable(struct mm_struct *mm)
118 {                                                 112 {
119         int ret;                                  113         int ret;
120                                                   114 
121         __mmap_lock_trace_start_locking(mm, tr    115         __mmap_lock_trace_start_locking(mm, true);
122         ret = down_write_killable(&mm->mmap_lo    116         ret = down_write_killable(&mm->mmap_lock);
123         __mmap_lock_trace_acquire_returned(mm,    117         __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
                                                   >> 118         return ret;
                                                   >> 119 }
                                                   >> 120 
                                                   >> 121 static inline bool mmap_write_trylock(struct mm_struct *mm)
                                                   >> 122 {
                                                   >> 123         bool ret;
                                                   >> 124 
                                                   >> 125         __mmap_lock_trace_start_locking(mm, true);
                                                   >> 126         ret = down_write_trylock(&mm->mmap_lock) != 0;
                                                   >> 127         __mmap_lock_trace_acquire_returned(mm, true, ret);
124         return ret;                               128         return ret;
125 }                                                 129 }
126                                                   130 
127 static inline void mmap_write_unlock(struct mm    131 static inline void mmap_write_unlock(struct mm_struct *mm)
128 {                                                 132 {
129         __mmap_lock_trace_released(mm, true);     133         __mmap_lock_trace_released(mm, true);
130         vma_end_write_all(mm);                    134         vma_end_write_all(mm);
131         up_write(&mm->mmap_lock);                 135         up_write(&mm->mmap_lock);
132 }                                                 136 }
133                                                   137 
134 static inline void mmap_write_downgrade(struct    138 static inline void mmap_write_downgrade(struct mm_struct *mm)
135 {                                                 139 {
136         __mmap_lock_trace_acquire_returned(mm,    140         __mmap_lock_trace_acquire_returned(mm, false, true);
137         vma_end_write_all(mm);                    141         vma_end_write_all(mm);
138         downgrade_write(&mm->mmap_lock);          142         downgrade_write(&mm->mmap_lock);
139 }                                                 143 }
140                                                   144 
141 static inline void mmap_read_lock(struct mm_st    145 static inline void mmap_read_lock(struct mm_struct *mm)
142 {                                                 146 {
143         __mmap_lock_trace_start_locking(mm, fa    147         __mmap_lock_trace_start_locking(mm, false);
144         down_read(&mm->mmap_lock);                148         down_read(&mm->mmap_lock);
145         __mmap_lock_trace_acquire_returned(mm,    149         __mmap_lock_trace_acquire_returned(mm, false, true);
146 }                                                 150 }
147                                                   151 
148 static inline int mmap_read_lock_killable(stru    152 static inline int mmap_read_lock_killable(struct mm_struct *mm)
149 {                                                 153 {
150         int ret;                                  154         int ret;
151                                                   155 
152         __mmap_lock_trace_start_locking(mm, fa    156         __mmap_lock_trace_start_locking(mm, false);
153         ret = down_read_killable(&mm->mmap_loc    157         ret = down_read_killable(&mm->mmap_lock);
154         __mmap_lock_trace_acquire_returned(mm,    158         __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
155         return ret;                               159         return ret;
156 }                                                 160 }
157                                                   161 
158 static inline bool mmap_read_trylock(struct mm    162 static inline bool mmap_read_trylock(struct mm_struct *mm)
159 {                                                 163 {
160         bool ret;                                 164         bool ret;
161                                                   165 
162         __mmap_lock_trace_start_locking(mm, fa    166         __mmap_lock_trace_start_locking(mm, false);
163         ret = down_read_trylock(&mm->mmap_lock    167         ret = down_read_trylock(&mm->mmap_lock) != 0;
164         __mmap_lock_trace_acquire_returned(mm,    168         __mmap_lock_trace_acquire_returned(mm, false, ret);
165         return ret;                               169         return ret;
166 }                                                 170 }
167                                                   171 
168 static inline void mmap_read_unlock(struct mm_    172 static inline void mmap_read_unlock(struct mm_struct *mm)
169 {                                                 173 {
170         __mmap_lock_trace_released(mm, false);    174         __mmap_lock_trace_released(mm, false);
171         up_read(&mm->mmap_lock);                  175         up_read(&mm->mmap_lock);
172 }                                                 176 }
173                                                   177 
174 static inline void mmap_read_unlock_non_owner(    178 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
175 {                                                 179 {
176         __mmap_lock_trace_released(mm, false);    180         __mmap_lock_trace_released(mm, false);
177         up_read_non_owner(&mm->mmap_lock);        181         up_read_non_owner(&mm->mmap_lock);
178 }                                                 182 }
179                                                   183 
180 static inline int mmap_lock_is_contended(struc    184 static inline int mmap_lock_is_contended(struct mm_struct *mm)
181 {                                                 185 {
182         return rwsem_is_contended(&mm->mmap_lo    186         return rwsem_is_contended(&mm->mmap_lock);
183 }                                                 187 }
184                                                   188 
185 #endif /* _LINUX_MMAP_LOCK_H */                   189 #endif /* _LINUX_MMAP_LOCK_H */
186                                                   190 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php