~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/mmap_lock.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/mmap_lock.h (Architecture sparc) and /include/linux/mmap_lock.h (Architecture m68k)


  1 #ifndef _LINUX_MMAP_LOCK_H                          1 #ifndef _LINUX_MMAP_LOCK_H
  2 #define _LINUX_MMAP_LOCK_H                          2 #define _LINUX_MMAP_LOCK_H
  3                                                     3 
  4 #include <linux/lockdep.h>                          4 #include <linux/lockdep.h>
  5 #include <linux/mm_types.h>                         5 #include <linux/mm_types.h>
  6 #include <linux/mmdebug.h>                          6 #include <linux/mmdebug.h>
  7 #include <linux/rwsem.h>                            7 #include <linux/rwsem.h>
  8 #include <linux/tracepoint-defs.h>                  8 #include <linux/tracepoint-defs.h>
  9 #include <linux/types.h>                            9 #include <linux/types.h>
 10                                                    10 
 11 #define MMAP_LOCK_INITIALIZER(name) \              11 #define MMAP_LOCK_INITIALIZER(name) \
 12         .mmap_lock = __RWSEM_INITIALIZER((name     12         .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
 13                                                    13 
 14 DECLARE_TRACEPOINT(mmap_lock_start_locking);       14 DECLARE_TRACEPOINT(mmap_lock_start_locking);
 15 DECLARE_TRACEPOINT(mmap_lock_acquire_returned)     15 DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
 16 DECLARE_TRACEPOINT(mmap_lock_released);            16 DECLARE_TRACEPOINT(mmap_lock_released);
 17                                                    17 
 18 #ifdef CONFIG_TRACING                              18 #ifdef CONFIG_TRACING
 19                                                    19 
 20 void __mmap_lock_do_trace_start_locking(struct     20 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
 21 void __mmap_lock_do_trace_acquire_returned(str     21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
 22                                            boo     22                                            bool success);
 23 void __mmap_lock_do_trace_released(struct mm_s     23 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
 24                                                    24 
 25 static inline void __mmap_lock_trace_start_loc     25 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
 26                                                    26                                                    bool write)
 27 {                                                  27 {
 28         if (tracepoint_enabled(mmap_lock_start     28         if (tracepoint_enabled(mmap_lock_start_locking))
 29                 __mmap_lock_do_trace_start_loc     29                 __mmap_lock_do_trace_start_locking(mm, write);
 30 }                                                  30 }
 31                                                    31 
 32 static inline void __mmap_lock_trace_acquire_r     32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
 33                                                    33                                                       bool write, bool success)
 34 {                                                  34 {
 35         if (tracepoint_enabled(mmap_lock_acqui     35         if (tracepoint_enabled(mmap_lock_acquire_returned))
 36                 __mmap_lock_do_trace_acquire_r     36                 __mmap_lock_do_trace_acquire_returned(mm, write, success);
 37 }                                                  37 }
 38                                                    38 
 39 static inline void __mmap_lock_trace_released(     39 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
 40 {                                                  40 {
 41         if (tracepoint_enabled(mmap_lock_relea     41         if (tracepoint_enabled(mmap_lock_released))
 42                 __mmap_lock_do_trace_released(     42                 __mmap_lock_do_trace_released(mm, write);
 43 }                                                  43 }
 44                                                    44 
 45 #else /* !CONFIG_TRACING */                        45 #else /* !CONFIG_TRACING */
 46                                                    46 
 47 static inline void __mmap_lock_trace_start_loc     47 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
 48                                                    48                                                    bool write)
 49 {                                                  49 {
 50 }                                                  50 }
 51                                                    51 
 52 static inline void __mmap_lock_trace_acquire_r     52 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
 53                                                    53                                                       bool write, bool success)
 54 {                                                  54 {
 55 }                                                  55 }
 56                                                    56 
 57 static inline void __mmap_lock_trace_released(     57 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
 58 {                                                  58 {
 59 }                                                  59 }
 60                                                    60 
 61 #endif /* CONFIG_TRACING */                        61 #endif /* CONFIG_TRACING */
 62                                                    62 
 63 static inline void mmap_assert_locked(const st     63 static inline void mmap_assert_locked(const struct mm_struct *mm)
 64 {                                                  64 {
 65         rwsem_assert_held(&mm->mmap_lock);         65         rwsem_assert_held(&mm->mmap_lock);
 66 }                                                  66 }
 67                                                    67 
 68 static inline void mmap_assert_write_locked(co     68 static inline void mmap_assert_write_locked(const struct mm_struct *mm)
 69 {                                                  69 {
 70         rwsem_assert_held_write(&mm->mmap_lock     70         rwsem_assert_held_write(&mm->mmap_lock);
 71 }                                                  71 }
 72                                                    72 
 73 #ifdef CONFIG_PER_VMA_LOCK                         73 #ifdef CONFIG_PER_VMA_LOCK
 74 /*                                                 74 /*
 75  * Drop all currently-held per-VMA locks.          75  * Drop all currently-held per-VMA locks.
 76  * This is called from the mmap_lock implement     76  * This is called from the mmap_lock implementation directly before releasing
 77  * a write-locked mmap_lock (or downgrading it     77  * a write-locked mmap_lock (or downgrading it to read-locked).
 78  * This should normally NOT be called manually     78  * This should normally NOT be called manually from other places.
 79  * If you want to call this manually anyway, k     79  * If you want to call this manually anyway, keep in mind that this will release
 80  * *all* VMA write locks, including ones from      80  * *all* VMA write locks, including ones from further up the stack.
 81  */                                                81  */
 82 static inline void vma_end_write_all(struct mm     82 static inline void vma_end_write_all(struct mm_struct *mm)
 83 {                                                  83 {
 84         mmap_assert_write_locked(mm);              84         mmap_assert_write_locked(mm);
 85         /*                                         85         /*
 86          * Nobody can concurrently modify mm->     86          * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
 87          * mmap_lock being held.                   87          * mmap_lock being held.
 88          * We need RELEASE semantics here to e     88          * We need RELEASE semantics here to ensure that preceding stores into
 89          * the VMA take effect before we unloc     89          * the VMA take effect before we unlock it with this store.
 90          * Pairs with ACQUIRE semantics in vma     90          * Pairs with ACQUIRE semantics in vma_start_read().
 91          */                                        91          */
 92         smp_store_release(&mm->mm_lock_seq, mm     92         smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
 93 }                                                  93 }
 94 #else                                              94 #else
 95 static inline void vma_end_write_all(struct mm     95 static inline void vma_end_write_all(struct mm_struct *mm) {}
 96 #endif                                             96 #endif
 97                                                    97 
 98 static inline void mmap_init_lock(struct mm_st     98 static inline void mmap_init_lock(struct mm_struct *mm)
 99 {                                                  99 {
100         init_rwsem(&mm->mmap_lock);               100         init_rwsem(&mm->mmap_lock);
101 }                                                 101 }
102                                                   102 
103 static inline void mmap_write_lock(struct mm_s    103 static inline void mmap_write_lock(struct mm_struct *mm)
104 {                                                 104 {
105         __mmap_lock_trace_start_locking(mm, tr    105         __mmap_lock_trace_start_locking(mm, true);
106         down_write(&mm->mmap_lock);               106         down_write(&mm->mmap_lock);
107         __mmap_lock_trace_acquire_returned(mm,    107         __mmap_lock_trace_acquire_returned(mm, true, true);
108 }                                                 108 }
109                                                   109 
110 static inline void mmap_write_lock_nested(stru    110 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
111 {                                                 111 {
112         __mmap_lock_trace_start_locking(mm, tr    112         __mmap_lock_trace_start_locking(mm, true);
113         down_write_nested(&mm->mmap_lock, subc    113         down_write_nested(&mm->mmap_lock, subclass);
114         __mmap_lock_trace_acquire_returned(mm,    114         __mmap_lock_trace_acquire_returned(mm, true, true);
115 }                                                 115 }
116                                                   116 
117 static inline int mmap_write_lock_killable(str    117 static inline int mmap_write_lock_killable(struct mm_struct *mm)
118 {                                                 118 {
119         int ret;                                  119         int ret;
120                                                   120 
121         __mmap_lock_trace_start_locking(mm, tr    121         __mmap_lock_trace_start_locking(mm, true);
122         ret = down_write_killable(&mm->mmap_lo    122         ret = down_write_killable(&mm->mmap_lock);
123         __mmap_lock_trace_acquire_returned(mm,    123         __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
124         return ret;                               124         return ret;
125 }                                                 125 }
126                                                   126 
127 static inline void mmap_write_unlock(struct mm    127 static inline void mmap_write_unlock(struct mm_struct *mm)
128 {                                                 128 {
129         __mmap_lock_trace_released(mm, true);     129         __mmap_lock_trace_released(mm, true);
130         vma_end_write_all(mm);                    130         vma_end_write_all(mm);
131         up_write(&mm->mmap_lock);                 131         up_write(&mm->mmap_lock);
132 }                                                 132 }
133                                                   133 
134 static inline void mmap_write_downgrade(struct    134 static inline void mmap_write_downgrade(struct mm_struct *mm)
135 {                                                 135 {
136         __mmap_lock_trace_acquire_returned(mm,    136         __mmap_lock_trace_acquire_returned(mm, false, true);
137         vma_end_write_all(mm);                    137         vma_end_write_all(mm);
138         downgrade_write(&mm->mmap_lock);          138         downgrade_write(&mm->mmap_lock);
139 }                                                 139 }
140                                                   140 
141 static inline void mmap_read_lock(struct mm_st    141 static inline void mmap_read_lock(struct mm_struct *mm)
142 {                                                 142 {
143         __mmap_lock_trace_start_locking(mm, fa    143         __mmap_lock_trace_start_locking(mm, false);
144         down_read(&mm->mmap_lock);                144         down_read(&mm->mmap_lock);
145         __mmap_lock_trace_acquire_returned(mm,    145         __mmap_lock_trace_acquire_returned(mm, false, true);
146 }                                                 146 }
147                                                   147 
148 static inline int mmap_read_lock_killable(stru    148 static inline int mmap_read_lock_killable(struct mm_struct *mm)
149 {                                                 149 {
150         int ret;                                  150         int ret;
151                                                   151 
152         __mmap_lock_trace_start_locking(mm, fa    152         __mmap_lock_trace_start_locking(mm, false);
153         ret = down_read_killable(&mm->mmap_loc    153         ret = down_read_killable(&mm->mmap_lock);
154         __mmap_lock_trace_acquire_returned(mm,    154         __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
155         return ret;                               155         return ret;
156 }                                                 156 }
157                                                   157 
158 static inline bool mmap_read_trylock(struct mm    158 static inline bool mmap_read_trylock(struct mm_struct *mm)
159 {                                                 159 {
160         bool ret;                                 160         bool ret;
161                                                   161 
162         __mmap_lock_trace_start_locking(mm, fa    162         __mmap_lock_trace_start_locking(mm, false);
163         ret = down_read_trylock(&mm->mmap_lock    163         ret = down_read_trylock(&mm->mmap_lock) != 0;
164         __mmap_lock_trace_acquire_returned(mm,    164         __mmap_lock_trace_acquire_returned(mm, false, ret);
165         return ret;                               165         return ret;
166 }                                                 166 }
167                                                   167 
168 static inline void mmap_read_unlock(struct mm_    168 static inline void mmap_read_unlock(struct mm_struct *mm)
169 {                                                 169 {
170         __mmap_lock_trace_released(mm, false);    170         __mmap_lock_trace_released(mm, false);
171         up_read(&mm->mmap_lock);                  171         up_read(&mm->mmap_lock);
172 }                                                 172 }
173                                                   173 
174 static inline void mmap_read_unlock_non_owner(    174 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
175 {                                                 175 {
176         __mmap_lock_trace_released(mm, false);    176         __mmap_lock_trace_released(mm, false);
177         up_read_non_owner(&mm->mmap_lock);        177         up_read_non_owner(&mm->mmap_lock);
178 }                                                 178 }
179                                                   179 
180 static inline int mmap_lock_is_contended(struc    180 static inline int mmap_lock_is_contended(struct mm_struct *mm)
181 {                                                 181 {
182         return rwsem_is_contended(&mm->mmap_lo    182         return rwsem_is_contended(&mm->mmap_lock);
183 }                                                 183 }
184                                                   184 
185 #endif /* _LINUX_MMAP_LOCK_H */                   185 #endif /* _LINUX_MMAP_LOCK_H */
186                                                   186 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php