1 /* SPDX-License-Identifier: GPL-2.0 */ << 2 /* rwsem.h: R/W semaphores, public interface 1 /* rwsem.h: R/W semaphores, public interface 3 * 2 * 4 * Written by David Howells (dhowells@redhat.c 3 * Written by David Howells (dhowells@redhat.com). 5 * Derived from asm-i386/semaphore.h 4 * Derived from asm-i386/semaphore.h >> 5 * >> 6 * Trylock by Brian Watson (Brian.J.Watson@compaq.com). 6 */ 7 */ 7 8 8 #ifndef _LINUX_RWSEM_H 9 #ifndef _LINUX_RWSEM_H 9 #define _LINUX_RWSEM_H 10 #define _LINUX_RWSEM_H 10 11 11 #include <linux/linkage.h> 12 #include <linux/linkage.h> 12 13 13 #include <linux/types.h> !! 14 #define RWSEM_DEBUG 0 14 #include <linux/list.h> << 15 #include <linux/spinlock.h> << 16 #include <linux/atomic.h> << 17 #include <linux/err.h> << 18 #include <linux/cleanup.h> << 19 << 20 #ifdef CONFIG_DEBUG_LOCK_ALLOC << 21 # define __RWSEM_DEP_MAP_INIT(lockname) << 22 .dep_map = { << 23 .name = #lockname, << 24 .wait_type_inner = LD_WAIT_SLE << 25 }, << 26 #else << 27 # define __RWSEM_DEP_MAP_INIT(lockname) << 28 #endif << 29 << 30 #ifndef CONFIG_PREEMPT_RT << 31 << 32 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER << 33 #include <linux/osq_lock.h> << 34 #endif << 35 << 36 /* << 37 * For an uncontended rwsem, count and owner a << 38 * needs to touch when acquiring the rwsem. So << 39 * other to increase the chance that they will << 40 * << 41 * In a contended rwsem, the owner is likely t << 42 * field in the structure as the optimistic wa << 43 * will spin on owner. For an embedded rwsem, << 44 * containing structure should be moved furthe << 45 * reduce the chance that they will share the << 46 * cacheline bouncing problem. << 47 */ << 48 struct rw_semaphore { << 49 atomic_long_t count; << 50 /* << 51 * Write owner or one of the read owne << 52 * the current state of the rwsem. Can << 53 * check to see if the write owner is << 54 */ << 55 atomic_long_t owner; << 56 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER << 57 struct optimistic_spin_queue osq; /* s << 58 #endif << 59 raw_spinlock_t wait_lock; << 60 struct list_head wait_list; << 61 #ifdef CONFIG_DEBUG_RWSEMS << 62 void *magic; << 63 #endif << 64 #ifdef CONFIG_DEBUG_LOCK_ALLOC << 65 struct lockdep_map dep_map; << 66 #endif << 67 }; << 68 << 69 #define RWSEM_UNLOCKED_VALUE 0UL << 70 #define RWSEM_WRITER_LOCKED (1UL < << 71 #define __RWSEM_COUNT_INIT(name) .count << 72 15 73 static inline int rwsem_is_locked(struct rw_se !! 16 #ifdef __KERNEL__ 74 { << 75 return atomic_long_read(&sem->count) ! << 76 } << 77 << 78 static inline void rwsem_assert_held_nolockdep << 79 { << 80 WARN_ON(atomic_long_read(&sem->count) << 81 } << 82 17 83 static inline void rwsem_assert_held_write_nol !! 18 #include <linux/config.h> 84 { !! 19 #include <linux/types.h> 85 WARN_ON(!(atomic_long_read(&sem->count !! 20 #include <linux/kernel.h> 86 } !! 21 #include <asm/system.h> >> 22 #include <asm/atomic.h> 87 23 88 /* Common initializer macros and functions */ !! 24 struct rw_semaphore; 89 25 90 #ifdef CONFIG_DEBUG_RWSEMS !! 26 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 91 # define __RWSEM_DEBUG_INIT(lockname) .magic = !! 27 #include <linux/rwsem-spinlock.h> /* use a generic implementation */ 92 #else 28 #else 93 # define __RWSEM_DEBUG_INIT(lockname) !! 29 #include <asm/rwsem.h> /* use an arch-specific implementation */ 94 #endif 30 #endif 95 31 96 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER !! 32 #ifndef rwsemtrace 97 #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_ !! 33 #if RWSEM_DEBUG >> 34 extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str)); 98 #else 35 #else 99 #define __RWSEM_OPT_INIT(lockname) !! 36 #define rwsemtrace(SEM,FMT) 100 #endif 37 #endif 101 << 102 #define __RWSEM_INITIALIZER(name) << 103 { __RWSEM_COUNT_INIT(name), << 104 .owner = ATOMIC_LONG_INIT(0), << 105 __RWSEM_OPT_INIT(name) << 106 .wait_lock = __RAW_SPIN_LOCK_UNLOCKE << 107 .wait_list = LIST_HEAD_INIT((name).w << 108 __RWSEM_DEBUG_INIT(name) << 109 __RWSEM_DEP_MAP_INIT(name) } << 110 << 111 #define DECLARE_RWSEM(name) \ << 112 struct rw_semaphore name = __RWSEM_INI << 113 << 114 extern void __init_rwsem(struct rw_semaphore * << 115 struct lock_class_key << 116 << 117 #define init_rwsem(sem) << 118 do { << 119 static struct lock_class_key __key; << 120 << 121 __init_rwsem((sem), #sem, &__key); << 122 } while (0) << 123 << 124 /* << 125 * This is the same regardless of which rwsem << 126 * It is just a heuristic meant to be called b << 127 * rwsem to see if somebody from an incompatib << 128 * lock. << 129 */ << 130 static inline int rwsem_is_contended(struct rw << 131 { << 132 return !list_empty(&sem->wait_list); << 133 } << 134 << 135 #else /* !CONFIG_PREEMPT_RT */ << 136 << 137 #include <linux/rwbase_rt.h> << 138 << 139 struct rw_semaphore { << 140 struct rwbase_rt rwbase; << 141 #ifdef CONFIG_DEBUG_LOCK_ALLOC << 142 struct lockdep_map dep_map; << 143 #endif 38 #endif 144 }; << 145 << 146 #define __RWSEM_INITIALIZER(name) << 147 { << 148 .rwbase = __RWBASE_INITIALIZER << 149 __RWSEM_DEP_MAP_INIT(name) << 150 } << 151 << 152 #define DECLARE_RWSEM(lockname) \ << 153 struct rw_semaphore lockname = __RWSEM << 154 << 155 extern void __init_rwsem(struct rw_semaphore << 156 struct lock_class_ke << 157 << 158 #define init_rwsem(sem) << 159 do { << 160 static struct lock_class_key __key; << 161 << 162 __init_rwsem((sem), #sem, &__key); << 163 } while (0) << 164 << 165 static __always_inline int rwsem_is_locked(con << 166 { << 167 return rw_base_is_locked(&sem->rwbase) << 168 } << 169 << 170 static __always_inline void rwsem_assert_held_ << 171 { << 172 WARN_ON(!rwsem_is_locked(sem)); << 173 } << 174 << 175 static __always_inline void rwsem_assert_held_ << 176 { << 177 WARN_ON(!rw_base_is_write_locked(&sem- << 178 } << 179 << 180 static __always_inline int rwsem_is_contended( << 181 { << 182 return rw_base_is_contended(&sem->rwba << 183 } << 184 << 185 #endif /* CONFIG_PREEMPT_RT */ << 186 39 187 /* 40 /* 188 * The functions below are the same for all rw !! 41 * lock for reading 189 * the RT specific variant. << 190 */ 42 */ 191 !! 43 static inline void down_read(struct rw_semaphore *sem) 192 static inline void rwsem_assert_held(const str << 193 { << 194 if (IS_ENABLED(CONFIG_LOCKDEP)) << 195 lockdep_assert_held(sem); << 196 else << 197 rwsem_assert_held_nolockdep(se << 198 } << 199 << 200 static inline void rwsem_assert_held_write(con << 201 { 44 { 202 if (IS_ENABLED(CONFIG_LOCKDEP)) !! 45 rwsemtrace(sem,"Entering down_read"); 203 lockdep_assert_held_write(sem) !! 46 __down_read(sem); 204 else !! 47 rwsemtrace(sem,"Leaving down_read"); 205 rwsem_assert_held_write_nolock << 206 } 48 } 207 49 208 /* 50 /* 209 * lock for reading << 210 */ << 211 extern void down_read(struct rw_semaphore *sem << 212 extern int __must_check down_read_interruptibl << 213 extern int __must_check down_read_killable(str << 214 << 215 /* << 216 * trylock for reading -- returns 1 if success 51 * trylock for reading -- returns 1 if successful, 0 if contention 217 */ 52 */ 218 extern int down_read_trylock(struct rw_semapho !! 53 static inline int down_read_trylock(struct rw_semaphore *sem) >> 54 { >> 55 int ret; >> 56 rwsemtrace(sem,"Entering down_read_trylock"); >> 57 ret = __down_read_trylock(sem); >> 58 rwsemtrace(sem,"Leaving down_read_trylock"); >> 59 return ret; >> 60 } 219 61 220 /* 62 /* 221 * lock for writing 63 * lock for writing 222 */ 64 */ 223 extern void down_write(struct rw_semaphore *se !! 65 static inline void down_write(struct rw_semaphore *sem) 224 extern int __must_check down_write_killable(st !! 66 { >> 67 rwsemtrace(sem,"Entering down_write"); >> 68 __down_write(sem); >> 69 rwsemtrace(sem,"Leaving down_write"); >> 70 } 225 71 226 /* 72 /* 227 * trylock for writing -- returns 1 if success 73 * trylock for writing -- returns 1 if successful, 0 if contention 228 */ 74 */ 229 extern int down_write_trylock(struct rw_semaph !! 75 static inline int down_write_trylock(struct rw_semaphore *sem) >> 76 { >> 77 int ret; >> 78 rwsemtrace(sem,"Entering down_write_trylock"); >> 79 ret = __down_write_trylock(sem); >> 80 rwsemtrace(sem,"Leaving down_write_trylock"); >> 81 return ret; >> 82 } 230 83 231 /* 84 /* 232 * release a read lock 85 * release a read lock 233 */ 86 */ 234 extern void up_read(struct rw_semaphore *sem); !! 87 static inline void up_read(struct rw_semaphore *sem) >> 88 { >> 89 rwsemtrace(sem,"Entering up_read"); >> 90 __up_read(sem); >> 91 rwsemtrace(sem,"Leaving up_read"); >> 92 } 235 93 236 /* 94 /* 237 * release a write lock 95 * release a write lock 238 */ 96 */ 239 extern void up_write(struct rw_semaphore *sem) !! 97 static inline void up_write(struct rw_semaphore *sem) 240 !! 98 { 241 DEFINE_GUARD(rwsem_read, struct rw_semaphore * !! 99 rwsemtrace(sem,"Entering up_write"); 242 DEFINE_GUARD_COND(rwsem_read, _try, down_read_ !! 100 __up_write(sem); 243 DEFINE_GUARD_COND(rwsem_read, _intr, down_read !! 101 rwsemtrace(sem,"Leaving up_write"); 244 !! 102 } 245 DEFINE_GUARD(rwsem_write, struct rw_semaphore << 246 DEFINE_GUARD_COND(rwsem_write, _try, down_writ << 247 << 248 /* << 249 * downgrade write lock to read lock << 250 */ << 251 extern void downgrade_write(struct rw_semaphor << 252 << 253 #ifdef CONFIG_DEBUG_LOCK_ALLOC << 254 /* << 255 * nested locking. NOTE: rwsems are not allowe << 256 * (which occurs if the same task tries to acq << 257 * lock instance multiple times), but multiple << 258 * same lock class might be taken, if the orde << 259 * is always the same. This ordering rule can << 260 * to lockdep via the _nested() APIs, but enum << 261 * subclasses that are used. (If the nesting r << 262 * static then another method for expressing n << 263 * the explicit definition of lock class keys << 264 * lockdep_set_class() at lock initialization << 265 * See Documentation/locking/lockdep-design.rs << 266 */ << 267 extern void down_read_nested(struct rw_semapho << 268 extern int __must_check down_read_killable_nes << 269 extern void down_write_nested(struct rw_semaph << 270 extern int down_write_killable_nested(struct r << 271 extern void _down_write_nest_lock(struct rw_se << 272 << 273 # define down_write_nest_lock(sem, nest_lock) << 274 do { << 275 typecheck(struct lockdep_map *, &(nest << 276 _down_write_nest_lock(sem, &(nest_lock << 277 } while (0) << 278 103 279 /* << 280 * Take/release a lock when not the owner will << 281 * << 282 * [ This API should be avoided as much as pos << 283 * proper abstraction for this case is compl << 284 */ << 285 extern void down_read_non_owner(struct rw_sema << 286 extern void up_read_non_owner(struct rw_semaph << 287 #else << 288 # define down_read_nested(sem, subclass) << 289 # define down_read_killable_nested(sem, subcla << 290 # define down_write_nest_lock(sem, nest_lock) << 291 # define down_write_nested(sem, subclass) << 292 # define down_write_killable_nested(sem, subcl << 293 # define down_read_non_owner(sem) << 294 # define up_read_non_owner(sem) << 295 #endif << 296 104 >> 105 #endif /* __KERNEL__ */ 297 #endif /* _LINUX_RWSEM_H */ 106 #endif /* _LINUX_RWSEM_H */ 298 107
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.