1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* rwsem.h: R/W semaphores, public interface 2 /* rwsem.h: R/W semaphores, public interface 3 * 3 * 4 * Written by David Howells (dhowells@redhat.c 4 * Written by David Howells (dhowells@redhat.com). 5 * Derived from asm-i386/semaphore.h 5 * Derived from asm-i386/semaphore.h 6 */ 6 */ 7 7 8 #ifndef _LINUX_RWSEM_H 8 #ifndef _LINUX_RWSEM_H 9 #define _LINUX_RWSEM_H 9 #define _LINUX_RWSEM_H 10 10 11 #include <linux/linkage.h> 11 #include <linux/linkage.h> 12 12 13 #include <linux/types.h> 13 #include <linux/types.h> >> 14 #include <linux/kernel.h> 14 #include <linux/list.h> 15 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 #include <linux/spinlock.h> 16 #include <linux/atomic.h> 17 #include <linux/atomic.h> 17 #include <linux/err.h> 18 #include <linux/err.h> 18 #include <linux/cleanup.h> << 19 << 20 #ifdef CONFIG_DEBUG_LOCK_ALLOC << 21 # define __RWSEM_DEP_MAP_INIT(lockname) << 22 .dep_map = { << 23 .name = #lockname, << 24 .wait_type_inner = LD_WAIT_SLE << 25 }, << 26 #else << 27 # define __RWSEM_DEP_MAP_INIT(lockname) << 28 #endif << 29 << 30 #ifndef CONFIG_PREEMPT_RT << 31 << 32 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 19 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 33 #include <linux/osq_lock.h> 20 #include <linux/osq_lock.h> 34 #endif 21 #endif 35 22 36 /* 23 /* 37 * For an uncontended rwsem, count and owner a 24 * For an uncontended rwsem, count and owner are the only fields a task 38 * needs to touch when acquiring the rwsem. So 25 * needs to touch when acquiring the rwsem. So they are put next to each 39 * other to increase the chance that they will 26 * other to increase the chance that they will share the same cacheline. 40 * 27 * 41 * In a contended rwsem, the owner is likely t 28 * In a contended rwsem, the owner is likely the most frequently accessed 42 * field in the structure as the optimistic wa 29 * field in the structure as the optimistic waiter that holds the osq lock 43 * will spin on owner. For an embedded rwsem, 30 * will spin on owner. For an embedded rwsem, other hot fields in the 44 * containing structure should be moved furthe 31 * containing structure should be moved further away from the rwsem to 45 * reduce the chance that they will share the 32 * reduce the chance that they will share the same cacheline causing 46 * cacheline bouncing problem. 33 * cacheline bouncing problem. 47 */ 34 */ 48 struct rw_semaphore { 35 struct rw_semaphore { 49 atomic_long_t count; 36 atomic_long_t count; 50 /* 37 /* 51 * Write owner or one of the read owne 38 * Write owner or one of the read owners as well flags regarding 52 * the current state of the rwsem. Can 39 * the current state of the rwsem. Can be used as a speculative 53 * check to see if the write owner is 40 * check to see if the write owner is running on the cpu. 54 */ 41 */ 55 atomic_long_t owner; 42 atomic_long_t owner; 56 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 43 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 57 struct optimistic_spin_queue osq; /* s 44 struct optimistic_spin_queue osq; /* spinner MCS lock */ 58 #endif 45 #endif 59 raw_spinlock_t wait_lock; 46 raw_spinlock_t wait_lock; 60 struct list_head wait_list; 47 struct list_head wait_list; 61 #ifdef CONFIG_DEBUG_RWSEMS 48 #ifdef CONFIG_DEBUG_RWSEMS 62 void *magic; 49 void *magic; 63 #endif 50 #endif 64 #ifdef CONFIG_DEBUG_LOCK_ALLOC 51 #ifdef CONFIG_DEBUG_LOCK_ALLOC 65 struct lockdep_map dep_map; 52 struct lockdep_map dep_map; 66 #endif 53 #endif 67 }; 54 }; 68 55 69 #define RWSEM_UNLOCKED_VALUE 0UL !! 56 /* In all implementations count != 0 means locked */ 70 #define RWSEM_WRITER_LOCKED (1UL < << 71 #define __RWSEM_COUNT_INIT(name) .count << 72 << 73 static inline int rwsem_is_locked(struct rw_se 57 static inline int rwsem_is_locked(struct rw_semaphore *sem) 74 { 58 { 75 return atomic_long_read(&sem->count) ! !! 59 return atomic_long_read(&sem->count) != 0; 76 } 60 } 77 61 78 static inline void rwsem_assert_held_nolockdep !! 62 #define RWSEM_UNLOCKED_VALUE 0L 79 { !! 63 #define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) 80 WARN_ON(atomic_long_read(&sem->count) << 81 } << 82 << 83 static inline void rwsem_assert_held_write_nol << 84 { << 85 WARN_ON(!(atomic_long_read(&sem->count << 86 } << 87 64 88 /* Common initializer macros and functions */ 65 /* Common initializer macros and functions */ 89 66 >> 67 #ifdef CONFIG_DEBUG_LOCK_ALLOC >> 68 # define __RWSEM_DEP_MAP_INIT(lockname) \ >> 69 .dep_map = { \ >> 70 .name = #lockname, \ >> 71 .wait_type_inner = LD_WAIT_SLEEP, \ >> 72 }, >> 73 #else >> 74 # define __RWSEM_DEP_MAP_INIT(lockname) >> 75 #endif >> 76 90 #ifdef CONFIG_DEBUG_RWSEMS 77 #ifdef CONFIG_DEBUG_RWSEMS 91 # define __RWSEM_DEBUG_INIT(lockname) .magic = 78 # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, 92 #else 79 #else 93 # define __RWSEM_DEBUG_INIT(lockname) 80 # define __RWSEM_DEBUG_INIT(lockname) 94 #endif 81 #endif 95 82 96 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 83 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 97 #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_ 84 #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED, 98 #else 85 #else 99 #define __RWSEM_OPT_INIT(lockname) 86 #define __RWSEM_OPT_INIT(lockname) 100 #endif 87 #endif 101 88 102 #define __RWSEM_INITIALIZER(name) 89 #define __RWSEM_INITIALIZER(name) \ 103 { __RWSEM_COUNT_INIT(name), 90 { __RWSEM_COUNT_INIT(name), \ 104 .owner = ATOMIC_LONG_INIT(0), 91 .owner = ATOMIC_LONG_INIT(0), \ 105 __RWSEM_OPT_INIT(name) 92 __RWSEM_OPT_INIT(name) \ 106 .wait_lock = __RAW_SPIN_LOCK_UNLOCKE 93 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\ 107 .wait_list = LIST_HEAD_INIT((name).w 94 .wait_list = LIST_HEAD_INIT((name).wait_list), \ 108 __RWSEM_DEBUG_INIT(name) 95 __RWSEM_DEBUG_INIT(name) \ 109 __RWSEM_DEP_MAP_INIT(name) } 96 __RWSEM_DEP_MAP_INIT(name) } 110 97 111 #define DECLARE_RWSEM(name) \ 98 #define DECLARE_RWSEM(name) \ 112 struct rw_semaphore name = __RWSEM_INI 99 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 113 100 114 extern void __init_rwsem(struct rw_semaphore * 101 extern void __init_rwsem(struct rw_semaphore *sem, const char *name, 115 struct lock_class_key 102 struct lock_class_key *key); 116 103 117 #define init_rwsem(sem) 104 #define init_rwsem(sem) \ 118 do { 105 do { \ 119 static struct lock_class_key __key; 106 static struct lock_class_key __key; \ 120 107 \ 121 __init_rwsem((sem), #sem, &__key); 108 __init_rwsem((sem), #sem, &__key); \ 122 } while (0) 109 } while (0) 123 110 124 /* 111 /* 125 * This is the same regardless of which rwsem 112 * This is the same regardless of which rwsem implementation that is being used. 126 * It is just a heuristic meant to be called b !! 113 * It is just a heuristic meant to be called by somebody alreadying holding the 127 * rwsem to see if somebody from an incompatib 114 * rwsem to see if somebody from an incompatible type is wanting access to the 128 * lock. 115 * lock. 129 */ 116 */ 130 static inline int rwsem_is_contended(struct rw 117 static inline int rwsem_is_contended(struct rw_semaphore *sem) 131 { 118 { 132 return !list_empty(&sem->wait_list); 119 return !list_empty(&sem->wait_list); 133 } 120 } 134 121 135 #else /* !CONFIG_PREEMPT_RT */ << 136 << 137 #include <linux/rwbase_rt.h> << 138 << 139 struct rw_semaphore { << 140 struct rwbase_rt rwbase; << 141 #ifdef CONFIG_DEBUG_LOCK_ALLOC << 142 struct lockdep_map dep_map; << 143 #endif << 144 }; << 145 << 146 #define __RWSEM_INITIALIZER(name) << 147 { << 148 .rwbase = __RWBASE_INITIALIZER << 149 __RWSEM_DEP_MAP_INIT(name) << 150 } << 151 << 152 #define DECLARE_RWSEM(lockname) \ << 153 struct rw_semaphore lockname = __RWSEM << 154 << 155 extern void __init_rwsem(struct rw_semaphore << 156 struct lock_class_ke << 157 << 158 #define init_rwsem(sem) << 159 do { << 160 static struct lock_class_key __key; << 161 << 162 __init_rwsem((sem), #sem, &__key); << 163 } while (0) << 164 << 165 static __always_inline int rwsem_is_locked(con << 166 { << 167 return rw_base_is_locked(&sem->rwbase) << 168 } << 169 << 170 static __always_inline void rwsem_assert_held_ << 171 { << 172 WARN_ON(!rwsem_is_locked(sem)); << 173 } << 174 << 175 static __always_inline void rwsem_assert_held_ << 176 { << 177 WARN_ON(!rw_base_is_write_locked(&sem- << 178 } << 179 << 180 static __always_inline int rwsem_is_contended( << 181 { << 182 return rw_base_is_contended(&sem->rwba << 183 } << 184 << 185 #endif /* CONFIG_PREEMPT_RT */ << 186 << 187 /* << 188 * The functions below are the same for all rw << 189 * the RT specific variant. << 190 */ << 191 << 192 static inline void rwsem_assert_held(const str << 193 { << 194 if (IS_ENABLED(CONFIG_LOCKDEP)) << 195 lockdep_assert_held(sem); << 196 else << 197 rwsem_assert_held_nolockdep(se << 198 } << 199 << 200 static inline void rwsem_assert_held_write(con << 201 { << 202 if (IS_ENABLED(CONFIG_LOCKDEP)) << 203 lockdep_assert_held_write(sem) << 204 else << 205 rwsem_assert_held_write_nolock << 206 } << 207 << 208 /* 122 /* 209 * lock for reading 123 * lock for reading 210 */ 124 */ 211 extern void down_read(struct rw_semaphore *sem 125 extern void down_read(struct rw_semaphore *sem); 212 extern int __must_check down_read_interruptibl 126 extern int __must_check down_read_interruptible(struct rw_semaphore *sem); 213 extern int __must_check down_read_killable(str 127 extern int __must_check down_read_killable(struct rw_semaphore *sem); 214 128 215 /* 129 /* 216 * trylock for reading -- returns 1 if success 130 * trylock for reading -- returns 1 if successful, 0 if contention 217 */ 131 */ 218 extern int down_read_trylock(struct rw_semapho 132 extern int down_read_trylock(struct rw_semaphore *sem); 219 133 220 /* 134 /* 221 * lock for writing 135 * lock for writing 222 */ 136 */ 223 extern void down_write(struct rw_semaphore *se 137 extern void down_write(struct rw_semaphore *sem); 224 extern int __must_check down_write_killable(st 138 extern int __must_check down_write_killable(struct rw_semaphore *sem); 225 139 226 /* 140 /* 227 * trylock for writing -- returns 1 if success 141 * trylock for writing -- returns 1 if successful, 0 if contention 228 */ 142 */ 229 extern int down_write_trylock(struct rw_semaph 143 extern int down_write_trylock(struct rw_semaphore *sem); 230 144 231 /* 145 /* 232 * release a read lock 146 * release a read lock 233 */ 147 */ 234 extern void up_read(struct rw_semaphore *sem); 148 extern void up_read(struct rw_semaphore *sem); 235 149 236 /* 150 /* 237 * release a write lock 151 * release a write lock 238 */ 152 */ 239 extern void up_write(struct rw_semaphore *sem) 153 extern void up_write(struct rw_semaphore *sem); 240 154 241 DEFINE_GUARD(rwsem_read, struct rw_semaphore * << 242 DEFINE_GUARD_COND(rwsem_read, _try, down_read_ << 243 DEFINE_GUARD_COND(rwsem_read, _intr, down_read << 244 << 245 DEFINE_GUARD(rwsem_write, struct rw_semaphore << 246 DEFINE_GUARD_COND(rwsem_write, _try, down_writ << 247 << 248 /* 155 /* 249 * downgrade write lock to read lock 156 * downgrade write lock to read lock 250 */ 157 */ 251 extern void downgrade_write(struct rw_semaphor 158 extern void downgrade_write(struct rw_semaphore *sem); 252 159 253 #ifdef CONFIG_DEBUG_LOCK_ALLOC 160 #ifdef CONFIG_DEBUG_LOCK_ALLOC 254 /* 161 /* 255 * nested locking. NOTE: rwsems are not allowe 162 * nested locking. NOTE: rwsems are not allowed to recurse 256 * (which occurs if the same task tries to acq 163 * (which occurs if the same task tries to acquire the same 257 * lock instance multiple times), but multiple 164 * lock instance multiple times), but multiple locks of the 258 * same lock class might be taken, if the orde 165 * same lock class might be taken, if the order of the locks 259 * is always the same. This ordering rule can 166 * is always the same. This ordering rule can be expressed 260 * to lockdep via the _nested() APIs, but enum 167 * to lockdep via the _nested() APIs, but enumerating the 261 * subclasses that are used. (If the nesting r 168 * subclasses that are used. (If the nesting relationship is 262 * static then another method for expressing n 169 * static then another method for expressing nested locking is 263 * the explicit definition of lock class keys 170 * the explicit definition of lock class keys and the use of 264 * lockdep_set_class() at lock initialization 171 * lockdep_set_class() at lock initialization time. 265 * See Documentation/locking/lockdep-design.rs 172 * See Documentation/locking/lockdep-design.rst for more details.) 266 */ 173 */ 267 extern void down_read_nested(struct rw_semapho 174 extern void down_read_nested(struct rw_semaphore *sem, int subclass); 268 extern int __must_check down_read_killable_nes 175 extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass); 269 extern void down_write_nested(struct rw_semaph 176 extern void down_write_nested(struct rw_semaphore *sem, int subclass); 270 extern int down_write_killable_nested(struct r 177 extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); 271 extern void _down_write_nest_lock(struct rw_se 178 extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); 272 179 273 # define down_write_nest_lock(sem, nest_lock) 180 # define down_write_nest_lock(sem, nest_lock) \ 274 do { 181 do { \ 275 typecheck(struct lockdep_map *, &(nest 182 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 276 _down_write_nest_lock(sem, &(nest_lock 183 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ 277 } while (0) !! 184 } while (0); 278 185 279 /* 186 /* 280 * Take/release a lock when not the owner will 187 * Take/release a lock when not the owner will release it. 281 * 188 * 282 * [ This API should be avoided as much as pos 189 * [ This API should be avoided as much as possible - the 283 * proper abstraction for this case is compl 190 * proper abstraction for this case is completions. ] 284 */ 191 */ 285 extern void down_read_non_owner(struct rw_sema 192 extern void down_read_non_owner(struct rw_semaphore *sem); 286 extern void up_read_non_owner(struct rw_semaph 193 extern void up_read_non_owner(struct rw_semaphore *sem); 287 #else 194 #else 288 # define down_read_nested(sem, subclass) 195 # define down_read_nested(sem, subclass) down_read(sem) 289 # define down_read_killable_nested(sem, subcla 196 # define down_read_killable_nested(sem, subclass) down_read_killable(sem) 290 # define down_write_nest_lock(sem, nest_lock) 197 # define down_write_nest_lock(sem, nest_lock) down_write(sem) 291 # define down_write_nested(sem, subclass) 198 # define down_write_nested(sem, subclass) down_write(sem) 292 # define down_write_killable_nested(sem, subcl 199 # define down_write_killable_nested(sem, subclass) down_write_killable(sem) 293 # define down_read_non_owner(sem) 200 # define down_read_non_owner(sem) down_read(sem) 294 # define up_read_non_owner(sem) 201 # define up_read_non_owner(sem) up_read(sem) 295 #endif 202 #endif 296 203 297 #endif /* _LINUX_RWSEM_H */ 204 #endif /* _LINUX_RWSEM_H */ 298 205
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.