1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * Runtime locking correctness validator 3 * Runtime locking correctness validator 4 * 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ing 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zi 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 7 * 8 * see Documentation/locking/lockdep-design.rs !! 8 * see Documentation/locking/lockdep-design.txt for more details. 9 */ 9 */ 10 #ifndef __LINUX_LOCKDEP_H 10 #ifndef __LINUX_LOCKDEP_H 11 #define __LINUX_LOCKDEP_H 11 #define __LINUX_LOCKDEP_H 12 12 13 #include <linux/lockdep_types.h> << 14 #include <linux/smp.h> << 15 #include <asm/percpu.h> << 16 << 17 struct task_struct; 13 struct task_struct; >> 14 struct lockdep_map; >> 15 >> 16 /* for sysctl */ >> 17 extern int prove_locking; >> 18 extern int lock_stat; >> 19 >> 20 #define MAX_LOCKDEP_SUBCLASSES 8UL >> 21 >> 22 #include <linux/types.h> 18 23 19 #ifdef CONFIG_LOCKDEP 24 #ifdef CONFIG_LOCKDEP 20 25 21 #include <linux/linkage.h> 26 #include <linux/linkage.h> 22 #include <linux/list.h> 27 #include <linux/list.h> 23 #include <linux/debug_locks.h> 28 #include <linux/debug_locks.h> 24 #include <linux/stacktrace.h> 29 #include <linux/stacktrace.h> 25 30 >> 31 /* >> 32 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need >> 33 * the total number of states... :-( >> 34 */ >> 35 #define XXX_LOCK_USAGE_STATES (1+2*4) >> 36 >> 37 /* >> 38 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes >> 39 * cached in the instance of lockdep_map >> 40 * >> 41 * Currently main class (subclass == 0) and signle depth subclass >> 42 * are cached in lockdep_map. This optimization is mainly targeting >> 43 * on rq->lock. double_rq_lock() acquires this highly competitive with >> 44 * single depth. >> 45 */ >> 46 #define NR_LOCKDEP_CACHING_CLASSES 2 >> 47 >> 48 /* >> 49 * Lock-classes are keyed via unique addresses, by embedding the >> 50 * lockclass-key into the kernel (or module) .data section. (For >> 51 * static locks we use the lock address itself as the key.) >> 52 */ >> 53 struct lockdep_subclass_key { >> 54 char __one_byte; >> 55 } __attribute__ ((__packed__)); >> 56 >> 57 struct lock_class_key { >> 58 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; >> 59 }; >> 60 >> 61 extern struct lock_class_key __lockdep_no_validate__; >> 62 >> 63 #define LOCKSTAT_POINTS 4 >> 64 >> 65 /* >> 66 * The lock-class itself: >> 67 */ >> 68 struct lock_class { >> 69 /* >> 70 * class-hash: >> 71 */ >> 72 struct hlist_node hash_entry; >> 73 >> 74 /* >> 75 * global list of all lock-classes: >> 76 */ >> 77 struct list_head lock_entry; >> 78 >> 79 struct lockdep_subclass_key *key; >> 80 unsigned int subclass; >> 81 unsigned int dep_gen_id; >> 82 >> 83 /* >> 84 * IRQ/softirq usage tracking bits: >> 85 */ >> 86 unsigned long usage_mask; >> 87 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; >> 88 >> 89 /* >> 90 * These fields represent a directed graph of lock dependencies, >> 91 * to every node we attach a list of "forward" and a list of >> 92 * "backward" graph nodes. >> 93 */ >> 94 struct list_head locks_after, locks_before; >> 95 >> 96 /* >> 97 * Generation counter, when doing certain classes of graph walking, >> 98 * to ensure that we check one node only once: >> 99 */ >> 100 unsigned int version; >> 101 >> 102 /* >> 103 * Statistics counter: >> 104 */ >> 105 unsigned long ops; >> 106 >> 107 const char *name; >> 108 int name_version; >> 109 >> 110 #ifdef CONFIG_LOCK_STAT >> 111 unsigned long contention_point[LOCKSTAT_POINTS]; >> 112 unsigned long contending_point[LOCKSTAT_POINTS]; >> 113 #endif >> 114 }; >> 115 >> 116 #ifdef CONFIG_LOCK_STAT >> 117 struct lock_time { >> 118 s64 min; >> 119 s64 max; >> 120 s64 total; >> 121 unsigned long nr; >> 122 }; >> 123 >> 124 enum bounce_type { >> 125 bounce_acquired_write, >> 126 bounce_acquired_read, >> 127 bounce_contended_write, >> 128 bounce_contended_read, >> 129 nr_bounce_types, >> 130 >> 131 bounce_acquired = bounce_acquired_write, >> 132 bounce_contended = bounce_contended_write, >> 133 }; >> 134 >> 135 struct lock_class_stats { >> 136 unsigned long contention_point[LOCKSTAT_POINTS]; >> 137 unsigned long contending_point[LOCKSTAT_POINTS]; >> 138 struct lock_time read_waittime; >> 139 struct lock_time write_waittime; >> 140 struct lock_time read_holdtime; >> 141 struct lock_time write_holdtime; >> 142 unsigned long bounces[nr_bounce_types]; >> 143 }; >> 144 >> 145 struct lock_class_stats lock_stats(struct lock_class *class); >> 146 void clear_lock_stats(struct lock_class *class); >> 147 #endif >> 148 >> 149 /* >> 150 * Map the lock object (the lock instance) to the lock-class object. >> 151 * This is embedded into specific lock instances: >> 152 */ >> 153 struct lockdep_map { >> 154 struct lock_class_key *key; >> 155 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; >> 156 const char *name; >> 157 #ifdef CONFIG_LOCK_STAT >> 158 int cpu; >> 159 unsigned long ip; >> 160 #endif >> 161 #ifdef CONFIG_LOCKDEP_CROSSRELEASE >> 162 /* >> 163 * Whether it's a crosslock. >> 164 */ >> 165 int cross; >> 166 #endif >> 167 }; >> 168 26 static inline void lockdep_copy_map(struct loc 169 static inline void lockdep_copy_map(struct lockdep_map *to, 27 struct loc 170 struct lockdep_map *from) 28 { 171 { 29 int i; 172 int i; 30 173 31 *to = *from; 174 *to = *from; 32 /* 175 /* 33 * Since the class cache can be modifi 176 * Since the class cache can be modified concurrently we could observe 34 * half pointers (64bit arch using 32b 177 * half pointers (64bit arch using 32bit copy insns). Therefore clear 35 * the caches and take the performance 178 * the caches and take the performance hit. 36 * 179 * 37 * XXX it doesn't work well with lockd 180 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 38 * that relies on cache abuse. 181 * that relies on cache abuse. 39 */ 182 */ 40 for (i = 0; i < NR_LOCKDEP_CACHING_CLA 183 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 41 to->class_cache[i] = NULL; 184 to->class_cache[i] = NULL; 42 } 185 } 43 186 44 /* 187 /* 45 * Every lock has a list of other locks that w 188 * Every lock has a list of other locks that were taken after it. 46 * We only grow the list, never remove from it 189 * We only grow the list, never remove from it: 47 */ 190 */ 48 struct lock_list { 191 struct lock_list { 49 struct list_head entry; 192 struct list_head entry; 50 struct lock_class *class 193 struct lock_class *class; 51 struct lock_class *links !! 194 struct stack_trace trace; 52 const struct lock_trace *trace !! 195 int distance; 53 u16 distan << 54 /* bitmap of different dependencies fr << 55 u8 dep; << 56 /* used by BFS to record whether "prev << 57 u8 only_x << 58 196 59 /* 197 /* 60 * The parent field is used to impleme 198 * The parent field is used to implement breadth-first search, and the 61 * bit 0 is reused to indicate if the 199 * bit 0 is reused to indicate if the lock has been accessed in BFS. 62 */ 200 */ 63 struct lock_list *paren 201 struct lock_list *parent; 64 }; 202 }; 65 203 66 /** !! 204 /* 67 * struct lock_chain - lock dependency chain r !! 205 * We record lock dependency chains, so that we can cache them: 68 * << 69 * @irq_context: the same as irq_context in he << 70 * @depth: the number of held locks in t << 71 * @base: the index in chain_hlocks for << 72 * @entry: the collided lock chains in l << 73 * @chain_key: the hash key of this lock_cha << 74 */ 206 */ 75 struct lock_chain { 207 struct lock_chain { 76 /* see BUILD_BUG_ON()s in add_chain_ca !! 208 /* see BUILD_BUG_ON()s in lookup_chain_cache() */ 77 unsigned int irq_co 209 unsigned int irq_context : 2, 78 depth 210 depth : 6, 79 base 211 base : 24; 80 /* 4 byte hole */ 212 /* 4 byte hole */ 81 struct hlist_node entry; 213 struct hlist_node entry; 82 u64 chain_ 214 u64 chain_key; 83 }; 215 }; 84 216 >> 217 #define MAX_LOCKDEP_KEYS_BITS 13 85 /* 218 /* 86 * Initialization, self-test and debugging-out !! 219 * Subtract one because we offset hlock->class_idx by 1 in order >> 220 * to make 0 mean no class. This avoids overflowing the class_idx >> 221 * bitfield and hitting the BUG in hlock_class(). 87 */ 222 */ 88 extern void lockdep_init(void); !! 223 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) 89 extern void lockdep_reset(void); << 90 extern void lockdep_reset_lock(struct lockdep_ << 91 extern void lockdep_free_key_range(void *start << 92 extern asmlinkage void lockdep_sys_exit(void); << 93 extern void lockdep_set_selftest_task(struct t << 94 224 95 extern void lockdep_init_task(struct task_stru !! 225 struct held_lock { >> 226 /* >> 227 * One-way hash of the dependency chain up to this point. We >> 228 * hash the hashes step by step as the dependency chain grows. >> 229 * >> 230 * We use it for dependency-caching and we skip detection >> 231 * passes and dependency-updates if there is a cache-hit, so >> 232 * it is absolutely critical for 100% coverage of the validator >> 233 * to have a unique key value for every unique dependency path >> 234 * that can occur in the system, to make a unique hash value >> 235 * as likely as possible - hence the 64-bit width. >> 236 * >> 237 * The task struct holds the current hash value (initialized >> 238 * with zero), here we store the previous hash value: >> 239 */ >> 240 u64 prev_chain_key; >> 241 unsigned long acquire_ip; >> 242 struct lockdep_map *instance; >> 243 struct lockdep_map *nest_lock; >> 244 #ifdef CONFIG_LOCK_STAT >> 245 u64 waittime_stamp; >> 246 u64 holdtime_stamp; >> 247 #endif >> 248 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; >> 249 /* >> 250 * The lock-stack is unified in that the lock chains of interrupt >> 251 * contexts nest ontop of process context chains, but we 'separate' >> 252 * the hashes by starting with 0 if we cross into an interrupt >> 253 * context, and we also keep do not add cross-context lock >> 254 * dependencies - the lock usage graph walking covers that area >> 255 * anyway, and we'd just unnecessarily increase the number of >> 256 * dependencies otherwise. [Note: hardirq and softirq contexts >> 257 * are separated from each other too.] >> 258 * >> 259 * The following field is used to detect when we cross into an >> 260 * interrupt context: >> 261 */ >> 262 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ >> 263 unsigned int trylock:1; /* 16 bits */ >> 264 >> 265 unsigned int read:2; /* see lock_acquire() comment */ >> 266 unsigned int check:1; /* see lock_acquire() comment */ >> 267 unsigned int hardirqs_off:1; >> 268 unsigned int references:12; /* 32 bits */ >> 269 unsigned int pin_count; >> 270 #ifdef CONFIG_LOCKDEP_CROSSRELEASE >> 271 /* >> 272 * Generation id. >> 273 * >> 274 * A value of cross_gen_id will be stored when holding this, >> 275 * which is globally increased whenever each crosslock is held. >> 276 */ >> 277 unsigned int gen_id; >> 278 #endif >> 279 }; >> 280 >> 281 #ifdef CONFIG_LOCKDEP_CROSSRELEASE >> 282 #define MAX_XHLOCK_TRACE_ENTRIES 5 96 283 97 /* 284 /* 98 * Split the recursion counter in two to readi !! 285 * This is for keeping locks waiting for commit so that true dependencies >> 286 * can be added at commit step. 99 */ 287 */ 100 #define LOCKDEP_RECURSION_BITS 16 !! 288 struct hist_lock { 101 #define LOCKDEP_OFF (1U << LOCKDEP !! 289 /* 102 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - !! 290 * Id for each entry in the ring buffer. This is used to >> 291 * decide whether the ring buffer was overwritten or not. >> 292 * >> 293 * For example, >> 294 * >> 295 * |<----------- hist_lock ring buffer size ------->| >> 296 * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii >> 297 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii....................... >> 298 * >> 299 * where 'p' represents an acquisition in process >> 300 * context, 'i' represents an acquisition in irq >> 301 * context. >> 302 * >> 303 * In this example, the ring buffer was overwritten by >> 304 * acquisitions in irq context, that should be detected on >> 305 * rollback or commit. >> 306 */ >> 307 unsigned int hist_id; >> 308 >> 309 /* >> 310 * Seperate stack_trace data. This will be used at commit step. >> 311 */ >> 312 struct stack_trace trace; >> 313 unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES]; >> 314 >> 315 /* >> 316 * Seperate hlock instance. This will be used at commit step. >> 317 * >> 318 * TODO: Use a smaller data structure containing only necessary >> 319 * data. However, we should make lockdep code able to handle the >> 320 * smaller one first. >> 321 */ >> 322 struct held_lock hlock; >> 323 }; 103 324 104 /* 325 /* 105 * lockdep_{off,on}() are macros to avoid trac !! 326 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should 106 * to header dependencies. !! 327 * be called instead of lockdep_init_map(). 107 */ 328 */ >> 329 struct cross_lock { >> 330 /* >> 331 * When more than one acquisition of crosslocks are overlapped, >> 332 * we have to perform commit for them based on cross_gen_id of >> 333 * the first acquisition, which allows us to add more true >> 334 * dependencies. >> 335 * >> 336 * Moreover, when no acquisition of a crosslock is in progress, >> 337 * we should not perform commit because the lock might not exist >> 338 * any more, which might cause incorrect memory access. So we >> 339 * have to track the number of acquisitions of a crosslock. >> 340 */ >> 341 int nr_acquire; 108 342 109 #define lockdep_off() !! 343 /* 110 do { !! 344 * Seperate hlock instance. This will be used at commit step. 111 current->lockdep_recursion += LOCKDEP_ !! 345 * 112 } while (0) !! 346 * TODO: Use a smaller data structure containing only necessary >> 347 * data. However, we should make lockdep code able to handle the >> 348 * smaller one first. >> 349 */ >> 350 struct held_lock hlock; >> 351 }; 113 352 114 #define lockdep_on() !! 353 struct lockdep_map_cross { 115 do { !! 354 struct lockdep_map map; 116 current->lockdep_recursion -= LOCKDEP_ !! 355 struct cross_lock xlock; 117 } while (0) !! 356 }; >> 357 #endif 118 358 119 extern void lockdep_register_key(struct lock_c !! 359 /* 120 extern void lockdep_unregister_key(struct lock !! 360 * Initialization, self-test and debugging-output methods: >> 361 */ >> 362 extern void lockdep_info(void); >> 363 extern void lockdep_reset(void); >> 364 extern void lockdep_reset_lock(struct lockdep_map *lock); >> 365 extern void lockdep_free_key_range(void *start, unsigned long size); >> 366 extern asmlinkage void lockdep_sys_exit(void); >> 367 >> 368 extern void lockdep_off(void); >> 369 extern void lockdep_on(void); 121 370 122 /* 371 /* 123 * These methods are used by specific locking 372 * These methods are used by specific locking variants (spinlocks, 124 * rwlocks, mutexes and rwsems) to pass init/a 373 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 125 * to lockdep: 374 * to lockdep: 126 */ 375 */ 127 376 128 extern void lockdep_init_map_type(struct lockd !! 377 extern void lockdep_init_map(struct lockdep_map *lock, const char *name, 129 struct lock_class_key *key, int subcla !! 378 struct lock_class_key *key, int subclass); 130 << 131 static inline void << 132 lockdep_init_map_waits(struct lockdep_map *loc << 133 struct lock_class_key * << 134 { << 135 lockdep_init_map_type(lock, name, key, << 136 } << 137 << 138 static inline void << 139 lockdep_init_map_wait(struct lockdep_map *lock << 140 struct lock_class_key *k << 141 { << 142 lockdep_init_map_waits(lock, name, key << 143 } << 144 << 145 static inline void lockdep_init_map(struct loc << 146 struct lock_class << 147 { << 148 lockdep_init_map_wait(lock, name, key, << 149 } << 150 379 151 /* 380 /* 152 * Reinitialize a lock key - for cases where t 381 * Reinitialize a lock key - for cases where there is special locking or 153 * special initialization of locks so that the 382 * special initialization of locks so that the validator gets the scope 154 * of dependencies wrong: they are either too 383 * of dependencies wrong: they are either too broad (they need a class-split) 155 * or they are too narrow (they suffer from a 384 * or they are too narrow (they suffer from a false class-split): 156 */ 385 */ 157 #define lockdep_set_class(lock, key) !! 386 #define lockdep_set_class(lock, key) \ 158 lockdep_init_map_type(&(lock)->dep_map !! 387 lockdep_init_map(&(lock)->dep_map, #key, key, 0) 159 (lock)->dep_map. !! 388 #define lockdep_set_class_and_name(lock, key, name) \ 160 (lock)->dep_map. !! 389 lockdep_init_map(&(lock)->dep_map, name, key, 0) 161 (lock)->dep_map. !! 390 #define lockdep_set_class_and_subclass(lock, key, sub) \ 162 !! 391 lockdep_init_map(&(lock)->dep_map, #key, key, sub) 163 #define lockdep_set_class_and_name(lock, key, !! 392 #define lockdep_set_subclass(lock, sub) \ 164 lockdep_init_map_type(&(lock)->dep_map !! 393 lockdep_init_map(&(lock)->dep_map, #lock, \ 165 (lock)->dep_map. !! 394 (lock)->dep_map.key, sub) 166 (lock)->dep_map. !! 395 167 (lock)->dep_map. << 168 << 169 #define lockdep_set_class_and_subclass(lock, k << 170 lockdep_init_map_type(&(lock)->dep_map << 171 (lock)->dep_map. << 172 (lock)->dep_map. << 173 (lock)->dep_map. << 174 << 175 #define lockdep_set_subclass(lock, sub) << 176 lockdep_init_map_type(&(lock)->dep_map << 177 (lock)->dep_map. << 178 (lock)->dep_map. << 179 (lock)->dep_map. << 180 << 181 /** << 182 * lockdep_set_novalidate_class: disable check << 183 * lock << 184 * @lock: Lock to mark << 185 * << 186 * Lockdep will still record that this lock ha << 187 * instances when dumping locks << 188 */ << 189 #define lockdep_set_novalidate_class(lock) \ 396 #define lockdep_set_novalidate_class(lock) \ 190 lockdep_set_class_and_name(lock, &__lo 397 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 191 << 192 /** << 193 * lockdep_set_notrack_class: disable lockdep << 194 * @lock: Lock to mark << 195 * << 196 * Bigger hammer than lockdep_set_novalidate_c << 197 * which takes more locks than lockdep is able << 198 */ << 199 #define lockdep_set_notrack_class(lock) \ << 200 lockdep_set_class_and_name(lock, &__lo << 201 << 202 /* 398 /* 203 * Compare locking classes 399 * Compare locking classes 204 */ 400 */ 205 #define lockdep_match_class(lock, key) lockdep 401 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 206 402 207 static inline int lockdep_match_key(struct loc 403 static inline int lockdep_match_key(struct lockdep_map *lock, 208 struct loc 404 struct lock_class_key *key) 209 { 405 { 210 return lock->key == key; 406 return lock->key == key; 211 } 407 } 212 408 213 /* 409 /* 214 * Acquire a lock. 410 * Acquire a lock. 215 * 411 * 216 * Values for "read": 412 * Values for "read": 217 * 413 * 218 * 0: exclusive (write) acquire 414 * 0: exclusive (write) acquire 219 * 1: read-acquire (no recursion allowed) 415 * 1: read-acquire (no recursion allowed) 220 * 2: read-acquire with same-instance recurs 416 * 2: read-acquire with same-instance recursion allowed 221 * 417 * 222 * Values for check: 418 * Values for check: 223 * 419 * 224 * 0: simple checks (freeing, held-at-exit-t 420 * 0: simple checks (freeing, held-at-exit-time, etc.) 225 * 1: full validation 421 * 1: full validation 226 */ 422 */ 227 extern void lock_acquire(struct lockdep_map *l 423 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 228 int trylock, int read 424 int trylock, int read, int check, 229 struct lockdep_map *n 425 struct lockdep_map *nest_lock, unsigned long ip); 230 426 231 extern void lock_release(struct lockdep_map *l !! 427 extern void lock_release(struct lockdep_map *lock, int nested, 232 !! 428 unsigned long ip); 233 extern void lock_sync(struct lockdep_map *lock << 234 int read, int check, str << 235 unsigned long ip); << 236 << 237 /* lock_is_held_type() returns */ << 238 #define LOCK_STATE_UNKNOWN -1 << 239 #define LOCK_STATE_NOT_HELD 0 << 240 #define LOCK_STATE_HELD 1 << 241 429 242 /* 430 /* 243 * Same "read" as for lock_acquire(), except - 431 * Same "read" as for lock_acquire(), except -1 means any. 244 */ 432 */ 245 extern int lock_is_held_type(const struct lock !! 433 extern int lock_is_held_type(struct lockdep_map *lock, int read); 246 434 247 static inline int lock_is_held(const struct lo !! 435 static inline int lock_is_held(struct lockdep_map *lock) 248 { 436 { 249 return lock_is_held_type(lock, -1); 437 return lock_is_held_type(lock, -1); 250 } 438 } 251 439 252 #define lockdep_is_held(lock) lock_i 440 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 253 #define lockdep_is_held_type(lock, r) lock_i 441 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) 254 442 255 extern void lock_set_class(struct lockdep_map 443 extern void lock_set_class(struct lockdep_map *lock, const char *name, 256 struct lock_class_k 444 struct lock_class_key *key, unsigned int subclass, 257 unsigned long ip); 445 unsigned long ip); 258 446 259 #define lock_set_novalidate_class(l, n, i) \ << 260 lock_set_class(l, n, &__lockdep_no_val << 261 << 262 static inline void lock_set_subclass(struct lo 447 static inline void lock_set_subclass(struct lockdep_map *lock, 263 unsigned int subclass, unsigne 448 unsigned int subclass, unsigned long ip) 264 { 449 { 265 lock_set_class(lock, lock->name, lock- 450 lock_set_class(lock, lock->name, lock->key, subclass, ip); 266 } 451 } 267 452 268 extern void lock_downgrade(struct lockdep_map 453 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); 269 454 >> 455 struct pin_cookie { unsigned int val; }; >> 456 270 #define NIL_COOKIE (struct pin_cookie){ .val = 457 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } 271 458 272 extern struct pin_cookie lock_pin_lock(struct 459 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); 273 extern void lock_repin_lock(struct lockdep_map 460 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); 274 extern void lock_unpin_lock(struct lockdep_map 461 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); 275 462 276 #define lockdep_depth(tsk) (debug_locks ? !! 463 # define INIT_LOCKDEP .lockdep_recursion = 0, 277 << 278 #define lockdep_assert(cond) \ << 279 do { WARN_ON(debug_locks && !(cond)); << 280 << 281 #define lockdep_assert_once(cond) \ << 282 do { WARN_ON_ONCE(debug_locks && !(con << 283 << 284 #define lockdep_assert_held(l) \ << 285 lockdep_assert(lockdep_is_held(l) != L << 286 << 287 #define lockdep_assert_not_held(l) \ << 288 lockdep_assert(lockdep_is_held(l) != L << 289 << 290 #define lockdep_assert_held_write(l) \ << 291 lockdep_assert(lockdep_is_held_type(l, << 292 464 293 #define lockdep_assert_held_read(l) \ !! 465 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 294 lockdep_assert(lockdep_is_held_type(l, << 295 << 296 #define lockdep_assert_held_once(l) << 297 lockdep_assert_once(lockdep_is_held(l) << 298 466 299 #define lockdep_assert_none_held_once() !! 467 #define lockdep_assert_held(l) do { \ 300 lockdep_assert_once(!current->lockdep_ !! 468 WARN_ON(debug_locks && !lockdep_is_held(l)); \ >> 469 } while (0) >> 470 >> 471 #define lockdep_assert_held_exclusive(l) do { \ >> 472 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ >> 473 } while (0) >> 474 >> 475 #define lockdep_assert_held_read(l) do { \ >> 476 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ >> 477 } while (0) >> 478 >> 479 #define lockdep_assert_held_once(l) do { \ >> 480 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ >> 481 } while (0) 301 482 302 #define lockdep_recursing(tsk) ((tsk)->lockde 483 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 303 484 304 #define lockdep_pin_lock(l) lock_pin_lock( 485 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) 305 #define lockdep_repin_lock(l,c) lock_repin_loc 486 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 306 #define lockdep_unpin_lock(l,c) lock_unpin_loc 487 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 307 488 308 /* << 309 * Must use lock_map_aquire_try() with overrid << 310 * lockdep thinking they participate in the bl << 311 */ << 312 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_ << 313 struct lockdep_map _name = { << 314 .name = #_name "-wait-type-ove << 315 .wait_type_inner = _wait_type, << 316 .lock_type = LD_LOCK_WAIT_OVER << 317 << 318 #else /* !CONFIG_LOCKDEP */ 489 #else /* !CONFIG_LOCKDEP */ 319 490 320 static inline void lockdep_init_task(struct ta << 321 { << 322 } << 323 << 324 static inline void lockdep_off(void) 491 static inline void lockdep_off(void) 325 { 492 { 326 } 493 } 327 494 328 static inline void lockdep_on(void) 495 static inline void lockdep_on(void) 329 { 496 { 330 } 497 } 331 498 332 static inline void lockdep_set_selftest_task(s << 333 { << 334 } << 335 << 336 # define lock_acquire(l, s, t, r, c, n, i) 499 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 337 # define lock_release(l, i) !! 500 # define lock_release(l, n, i) do { } while (0) 338 # define lock_downgrade(l, i) 501 # define lock_downgrade(l, i) do { } while (0) 339 # define lock_set_class(l, n, key, s, i) !! 502 # define lock_set_class(l, n, k, s, i) do { } while (0) 340 # define lock_set_novalidate_class(l, n, i) << 341 # define lock_set_subclass(l, s, i) 503 # define lock_set_subclass(l, s, i) do { } while (0) 342 # define lockdep_init() !! 504 # define lockdep_info() do { } while (0) 343 # define lockdep_init_map_type(lock, name, key << 344 do { (void)(name); (void)(key) << 345 # define lockdep_init_map_waits(lock, name, ke << 346 do { (void)(name); (void)(key) << 347 # define lockdep_init_map_wait(lock, name, key << 348 do { (void)(name); (void)(key) << 349 # define lockdep_init_map(lock, name, key, sub 505 # define lockdep_init_map(lock, name, key, sub) \ 350 do { (void)(name); (void)(key) 506 do { (void)(name); (void)(key); } while (0) 351 # define lockdep_set_class(lock, key) 507 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) 352 # define lockdep_set_class_and_name(lock, key, 508 # define lockdep_set_class_and_name(lock, key, name) \ 353 do { (void)(key); (void)(name) 509 do { (void)(key); (void)(name); } while (0) 354 #define lockdep_set_class_and_subclass(lock, k 510 #define lockdep_set_class_and_subclass(lock, key, sub) \ 355 do { (void)(key); } while (0) 511 do { (void)(key); } while (0) 356 #define lockdep_set_subclass(lock, sub) 512 #define lockdep_set_subclass(lock, sub) do { } while (0) 357 513 358 #define lockdep_set_novalidate_class(lock) do 514 #define lockdep_set_novalidate_class(lock) do { } while (0) 359 #define lockdep_set_notrack_class(lock) do { } << 360 515 361 /* 516 /* 362 * We don't define lockdep_match_class() and l 517 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 363 * case since the result is not well defined a 518 * case since the result is not well defined and the caller should rather 364 * #ifdef the call himself. 519 * #ifdef the call himself. 365 */ 520 */ 366 521 >> 522 # define INIT_LOCKDEP 367 # define lockdep_reset() do { d 523 # define lockdep_reset() do { debug_locks = 1; } while (0) 368 # define lockdep_free_key_range(start, size) 524 # define lockdep_free_key_range(start, size) do { } while (0) 369 # define lockdep_sys_exit() 525 # define lockdep_sys_exit() do { } while (0) 370 !! 526 /* 371 static inline void lockdep_register_key(struct !! 527 * The class key takes no space if lockdep is disabled: 372 { !! 528 */ 373 } !! 529 struct lock_class_key { }; 374 << 375 static inline void lockdep_unregister_key(stru << 376 { << 377 } << 378 530 379 #define lockdep_depth(tsk) (0) 531 #define lockdep_depth(tsk) (0) 380 532 381 /* << 382 * Dummy forward declarations, allow users to << 383 * and depend on dead code elimination. << 384 */ << 385 extern int lock_is_held(const void *); << 386 extern int lockdep_is_held(const void *); << 387 #define lockdep_is_held_type(l, r) 533 #define lockdep_is_held_type(l, r) (1) 388 534 389 #define lockdep_assert(c) << 390 #define lockdep_assert_once(c) << 391 << 392 #define lockdep_assert_held(l) 535 #define lockdep_assert_held(l) do { (void)(l); } while (0) 393 #define lockdep_assert_not_held(l) !! 536 #define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) 394 #define lockdep_assert_held_write(l) << 395 #define lockdep_assert_held_read(l) 537 #define lockdep_assert_held_read(l) do { (void)(l); } while (0) 396 #define lockdep_assert_held_once(l) 538 #define lockdep_assert_held_once(l) do { (void)(l); } while (0) 397 #define lockdep_assert_none_held_once() do { } << 398 539 399 #define lockdep_recursing(tsk) 540 #define lockdep_recursing(tsk) (0) 400 541 >> 542 struct pin_cookie { }; >> 543 401 #define NIL_COOKIE (struct pin_cookie){ } 544 #define NIL_COOKIE (struct pin_cookie){ } 402 545 403 #define lockdep_pin_lock(l) !! 546 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) 404 #define lockdep_repin_lock(l, c) 547 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 405 #define lockdep_unpin_lock(l, c) 548 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 406 549 407 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_ << 408 struct lockdep_map __maybe_unused _nam << 409 << 410 #endif /* !LOCKDEP */ 550 #endif /* !LOCKDEP */ 411 551 412 #ifdef CONFIG_PROVE_LOCKING << 413 void lockdep_set_lock_cmp_fn(struct lockdep_ma << 414 << 415 #define lock_set_cmp_fn(lock, ...) lockde << 416 #else << 417 #define lock_set_cmp_fn(lock, ...) do { } << 418 #endif << 419 << 420 enum xhlock_context_t { 552 enum xhlock_context_t { 421 XHLOCK_HARD, 553 XHLOCK_HARD, 422 XHLOCK_SOFT, 554 XHLOCK_SOFT, 423 XHLOCK_CTX_NR, 555 XHLOCK_CTX_NR, 424 }; 556 }; 425 557 >> 558 #ifdef CONFIG_LOCKDEP_CROSSRELEASE >> 559 extern void lockdep_init_map_crosslock(struct lockdep_map *lock, >> 560 const char *name, >> 561 struct lock_class_key *key, >> 562 int subclass); >> 563 extern void lock_commit_crosslock(struct lockdep_map *lock); >> 564 >> 565 /* >> 566 * What we essencially have to initialize is 'nr_acquire'. Other members >> 567 * will be initialized in add_xlock(). >> 568 */ >> 569 #define STATIC_CROSS_LOCK_INIT() \ >> 570 { .nr_acquire = 0,} >> 571 >> 572 #define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \ >> 573 { .map.name = (_name), .map.key = (void *)(_key), \ >> 574 .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), } >> 575 >> 576 /* >> 577 * To initialize a lockdep_map statically use this macro. >> 578 * Note that _name must not be NULL. >> 579 */ >> 580 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ >> 581 { .name = (_name), .key = (void *)(_key), .cross = 0, } >> 582 >> 583 extern void crossrelease_hist_start(enum xhlock_context_t c); >> 584 extern void crossrelease_hist_end(enum xhlock_context_t c); >> 585 extern void lockdep_invariant_state(bool force); >> 586 extern void lockdep_init_task(struct task_struct *task); >> 587 extern void lockdep_free_task(struct task_struct *task); >> 588 #else /* !CROSSRELEASE */ >> 589 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 426 /* 590 /* 427 * To initialize a lockdep_map statically use 591 * To initialize a lockdep_map statically use this macro. 428 * Note that _name must not be NULL. 592 * Note that _name must not be NULL. 429 */ 593 */ 430 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 594 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 431 { .name = (_name), .key = (void *)(_ke 595 { .name = (_name), .key = (void *)(_key), } 432 596 >> 597 static inline void crossrelease_hist_start(enum xhlock_context_t c) {} >> 598 static inline void crossrelease_hist_end(enum xhlock_context_t c) {} 433 static inline void lockdep_invariant_state(boo 599 static inline void lockdep_invariant_state(bool force) {} >> 600 static inline void lockdep_init_task(struct task_struct *task) {} 434 static inline void lockdep_free_task(struct ta 601 static inline void lockdep_free_task(struct task_struct *task) {} >> 602 #endif /* CROSSRELEASE */ 435 603 436 #ifdef CONFIG_LOCK_STAT 604 #ifdef CONFIG_LOCK_STAT 437 605 438 extern void lock_contended(struct lockdep_map 606 extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 439 extern void lock_acquired(struct lockdep_map * 607 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 440 608 441 #define LOCK_CONTENDED(_lock, try, lock) 609 #define LOCK_CONTENDED(_lock, try, lock) \ 442 do { 610 do { \ 443 if (!try(_lock)) { 611 if (!try(_lock)) { \ 444 lock_contended(&(_lock)->dep_m 612 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 445 lock(_lock); 613 lock(_lock); \ 446 } 614 } \ 447 lock_acquired(&(_lock)->dep_map, _RET_ 615 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 448 } while (0) 616 } while (0) 449 617 450 #define LOCK_CONTENDED_RETURN(_lock, try, lock 618 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 451 ({ 619 ({ \ 452 int ____err = 0; 620 int ____err = 0; \ 453 if (!try(_lock)) { 621 if (!try(_lock)) { \ 454 lock_contended(&(_lock)->dep_m 622 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 455 ____err = lock(_lock); 623 ____err = lock(_lock); \ 456 } 624 } \ 457 if (!____err) 625 if (!____err) \ 458 lock_acquired(&(_lock)->dep_ma 626 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 459 ____err; 627 ____err; \ 460 }) 628 }) 461 629 462 #else /* CONFIG_LOCK_STAT */ 630 #else /* CONFIG_LOCK_STAT */ 463 631 464 #define lock_contended(lockdep_map, ip) do {} 632 #define lock_contended(lockdep_map, ip) do {} while (0) 465 #define lock_acquired(lockdep_map, ip) do {} w 633 #define lock_acquired(lockdep_map, ip) do {} while (0) 466 634 467 #define LOCK_CONTENDED(_lock, try, lock) \ 635 #define LOCK_CONTENDED(_lock, try, lock) \ 468 lock(_lock) 636 lock(_lock) 469 637 470 #define LOCK_CONTENDED_RETURN(_lock, try, lock 638 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 471 lock(_lock) 639 lock(_lock) 472 640 473 #endif /* CONFIG_LOCK_STAT */ 641 #endif /* CONFIG_LOCK_STAT */ 474 642 475 #ifdef CONFIG_PROVE_LOCKING !! 643 #ifdef CONFIG_LOCKDEP >> 644 >> 645 /* >> 646 * On lockdep we dont want the hand-coded irq-enable of >> 647 * _raw_*_lock_flags() code, because lockdep assumes >> 648 * that interrupts are not re-enabled during lock-acquire: >> 649 */ >> 650 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ >> 651 LOCK_CONTENDED((_lock), (try), (lock)) >> 652 >> 653 #else /* CONFIG_LOCKDEP */ >> 654 >> 655 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ >> 656 lockfl((_lock), (flags)) >> 657 >> 658 #endif /* CONFIG_LOCKDEP */ >> 659 >> 660 #ifdef CONFIG_TRACE_IRQFLAGS 476 extern void print_irqtrace_events(struct task_ 661 extern void print_irqtrace_events(struct task_struct *curr); 477 #else 662 #else 478 static inline void print_irqtrace_events(struc 663 static inline void print_irqtrace_events(struct task_struct *curr) 479 { 664 { 480 } 665 } 481 #endif 666 #endif 482 667 483 /* Variable used to make lockdep treat read_lo << 484 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS << 485 extern unsigned int force_read_lock_recursive; << 486 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ << 487 #define force_read_lock_recursive 0 << 488 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS * << 489 << 490 #ifdef CONFIG_LOCKDEP << 491 extern bool read_lock_is_recursive(void); << 492 #else /* CONFIG_LOCKDEP */ << 493 /* If !LOCKDEP, the value is meaningless */ << 494 #define read_lock_is_recursive() 0 << 495 #endif << 496 << 497 /* 668 /* 498 * For trivial one-depth nesting of a lock-cla 669 * For trivial one-depth nesting of a lock-class, the following 499 * global define can be used. (Subsystems with 670 * global define can be used. (Subsystems with multiple levels 500 * of nesting should define their own lock-nes 671 * of nesting should define their own lock-nesting subclasses.) 501 */ 672 */ 502 #define SINGLE_DEPTH_NESTING 673 #define SINGLE_DEPTH_NESTING 1 503 674 504 /* 675 /* 505 * Map the dependency ops to NOP or to real lo 676 * Map the dependency ops to NOP or to real lockdep ops, depending 506 * on the per lock-class debug mode: 677 * on the per lock-class debug mode: 507 */ 678 */ 508 679 509 #define lock_acquire_exclusive(l, s, t, n, i) 680 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 510 #define lock_acquire_shared(l, s, t, n, i) 681 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 511 #define lock_acquire_shared_recursive(l, s, t, 682 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 512 683 513 #define spin_acquire(l, s, t, i) 684 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 514 #define spin_acquire_nest(l, s, t, n, i) 685 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 515 #define spin_release(l, i) !! 686 #define spin_release(l, n, i) lock_release(l, n, i) 516 687 517 #define rwlock_acquire(l, s, t, i) 688 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 518 #define rwlock_acquire_read(l, s, t, i) !! 689 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 519 do { !! 690 #define rwlock_release(l, n, i) lock_release(l, n, i) 520 if (read_lock_is_recursive()) << 521 lock_acquire_shared_recursive( << 522 else << 523 lock_acquire_shared(l, s, t, N << 524 } while (0) << 525 << 526 #define rwlock_release(l, i) << 527 691 528 #define seqcount_acquire(l, s, t, i) 692 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 529 #define seqcount_acquire_read(l, s, t, i) 693 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 530 #define seqcount_release(l, i) !! 694 #define seqcount_release(l, n, i) lock_release(l, n, i) 531 695 532 #define mutex_acquire(l, s, t, i) 696 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 533 #define mutex_acquire_nest(l, s, t, n, i) 697 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 534 #define mutex_release(l, i) !! 698 #define mutex_release(l, n, i) lock_release(l, n, i) 535 699 536 #define rwsem_acquire(l, s, t, i) 700 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 537 #define rwsem_acquire_nest(l, s, t, n, i) 701 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 538 #define rwsem_acquire_read(l, s, t, i) 702 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 539 #define rwsem_release(l, i) !! 703 #define rwsem_release(l, n, i) lock_release(l, n, i) 540 704 541 #define lock_map_acquire(l) 705 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 542 #define lock_map_acquire_try(l) << 543 #define lock_map_acquire_read(l) 706 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 544 #define lock_map_acquire_tryread(l) 707 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 545 #define lock_map_release(l) !! 708 #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 546 #define lock_map_sync(l) << 547 709 548 #ifdef CONFIG_PROVE_LOCKING 710 #ifdef CONFIG_PROVE_LOCKING 549 # define might_lock(lock) !! 711 # define might_lock(lock) \ 550 do { 712 do { \ 551 typecheck(struct lockdep_map *, &(lock 713 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 552 lock_acquire(&(lock)->dep_map, 0, 0, 0 714 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 553 lock_release(&(lock)->dep_map, _THIS_I !! 715 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 554 } while (0) 716 } while (0) 555 # define might_lock_read(lock) !! 717 # define might_lock_read(lock) \ 556 do { 718 do { \ 557 typecheck(struct lockdep_map *, &(lock 719 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 558 lock_acquire(&(lock)->dep_map, 0, 0, 1 720 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 559 lock_release(&(lock)->dep_map, _THIS_I !! 721 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 560 } while (0) << 561 # define might_lock_nested(lock, subclass) << 562 do { << 563 typecheck(struct lockdep_map *, &(lock << 564 lock_acquire(&(lock)->dep_map, subclas << 565 _THIS_IP_); << 566 lock_release(&(lock)->dep_map, _THIS_I << 567 } while (0) << 568 << 569 DECLARE_PER_CPU(int, hardirqs_enabled); << 570 DECLARE_PER_CPU(int, hardirq_context); << 571 DECLARE_PER_CPU(unsigned int, lockdep_recursio << 572 << 573 #define __lockdep_enabled (debug_locks & << 574 << 575 #define lockdep_assert_irqs_enabled() << 576 do { << 577 WARN_ON_ONCE(__lockdep_enabled && !thi << 578 } while (0) << 579 << 580 #define lockdep_assert_irqs_disabled() << 581 do { << 582 WARN_ON_ONCE(__lockdep_enabled && this << 583 } while (0) << 584 << 585 #define lockdep_assert_in_irq() << 586 do { << 587 WARN_ON_ONCE(__lockdep_enabled && !thi << 588 } while (0) << 589 << 590 #define lockdep_assert_no_hardirq() << 591 do { << 592 WARN_ON_ONCE(__lockdep_enabled && (thi << 593 !th << 594 } while (0) << 595 << 596 #define lockdep_assert_preemption_enabled() << 597 do { << 598 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT << 599 __lockdep_enabled << 600 (preempt_count() != 0 << 601 !this_cpu_read(hardirqs_ << 602 } while (0) << 603 << 604 #define lockdep_assert_preemption_disabled() << 605 do { << 606 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT << 607 __lockdep_enabled << 608 (preempt_count() == 0 << 609 this_cpu_read(hardirqs_e << 610 } while (0) << 611 << 612 /* << 613 * Acceptable for protecting per-CPU resources << 614 * Much like in_softirq() - semantics are ambi << 615 */ << 616 #define lockdep_assert_in_softirq() << 617 do { << 618 WARN_ON_ONCE(__lockdep_enabled << 619 (!in_softirq() || in_irq( << 620 } while (0) 722 } while (0) 621 << 622 extern void lockdep_assert_in_softirq_func(voi << 623 << 624 #else 723 #else 625 # define might_lock(lock) do { } while (0) 724 # define might_lock(lock) do { } while (0) 626 # define might_lock_read(lock) do { } while (0 725 # define might_lock_read(lock) do { } while (0) 627 # define might_lock_nested(lock, subclass) do << 628 << 629 # define lockdep_assert_irqs_enabled() do { } << 630 # define lockdep_assert_irqs_disabled() do { } << 631 # define lockdep_assert_in_irq() do { } while << 632 # define lockdep_assert_no_hardirq() do { } wh << 633 << 634 # define lockdep_assert_preemption_enabled() d << 635 # define lockdep_assert_preemption_disabled() << 636 # define lockdep_assert_in_softirq() do { } wh << 637 # define lockdep_assert_in_softirq_func() do { << 638 #endif << 639 << 640 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING << 641 << 642 # define lockdep_assert_RT_in_threaded_ctx() d << 643 WARN_ONCE(debug_locks && !curr << 644 lockdep_hardirq_cont << 645 !(current->hardirq_t << 646 "Not in threaded con << 647 } while (0) << 648 << 649 #else << 650 << 651 # define lockdep_assert_RT_in_threaded_ctx() d << 652 << 653 #endif 726 #endif 654 727 655 #ifdef CONFIG_LOCKDEP 728 #ifdef CONFIG_LOCKDEP 656 void lockdep_rcu_suspicious(const char *file, 729 void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 657 #else 730 #else 658 static inline void 731 static inline void 659 lockdep_rcu_suspicious(const char *file, const 732 lockdep_rcu_suspicious(const char *file, const int line, const char *s) 660 { 733 { 661 } 734 } 662 #endif 735 #endif 663 736 664 #endif /* __LINUX_LOCKDEP_H */ 737 #endif /* __LINUX_LOCKDEP_H */ 665 738
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.