1 /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ing 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zi 7 * 8 * see Documentation/locking/lockdep-design.rs 9 */ 10 #ifndef __LINUX_LOCKDEP_H 11 #define __LINUX_LOCKDEP_H 12 13 #include <linux/lockdep_types.h> 14 #include <linux/smp.h> 15 #include <asm/percpu.h> 16 17 struct task_struct; 18 19 #ifdef CONFIG_LOCKDEP 20 21 #include <linux/linkage.h> 22 #include <linux/list.h> 23 #include <linux/debug_locks.h> 24 #include <linux/stacktrace.h> 25 26 static inline void lockdep_copy_map(struct loc 27 struct loc 28 { 29 int i; 30 31 *to = *from; 32 /* 33 * Since the class cache can be modifi 34 * half pointers (64bit arch using 32b 35 * the caches and take the performance 36 * 37 * XXX it doesn't work well with lockd 38 * that relies on cache abuse. 39 */ 40 for (i = 0; i < NR_LOCKDEP_CACHING_CLA 41 to->class_cache[i] = NULL; 42 } 43 44 /* 45 * Every lock has a list of other locks that w 46 * We only grow the list, never remove from it 47 */ 48 struct lock_list { 49 struct list_head entry; 50 struct lock_class *class 51 struct lock_class *links 52 const struct lock_trace *trace 53 u16 distan 54 /* bitmap of different dependencies fr 55 u8 dep; 56 /* used by BFS to record whether "prev 57 u8 only_x 58 59 /* 60 * The parent field is used to impleme 61 * bit 0 is reused to indicate if the 62 */ 63 struct lock_list *paren 64 }; 65 66 /** 67 * struct lock_chain - lock dependency chain r 68 * 69 * @irq_context: the same as irq_context in he 70 * @depth: the number of held locks in t 71 * @base: the index in chain_hlocks for 72 * @entry: the collided lock chains in l 73 * @chain_key: the hash key of this lock_cha 74 */ 75 struct lock_chain { 76 /* see BUILD_BUG_ON()s in add_chain_ca 77 unsigned int irq_co 78 depth 79 base 80 /* 4 byte hole */ 81 struct hlist_node entry; 82 u64 chain_ 83 }; 84 85 /* 86 * Initialization, self-test and debugging-out 87 */ 88 extern void lockdep_init(void); 89 extern void lockdep_reset(void); 90 extern void lockdep_reset_lock(struct lockdep_ 91 extern void lockdep_free_key_range(void *start 92 extern asmlinkage void lockdep_sys_exit(void); 93 extern void lockdep_set_selftest_task(struct t 94 95 extern void lockdep_init_task(struct task_stru 96 97 /* 98 * Split the recursion counter in two to readi 99 */ 100 #define LOCKDEP_RECURSION_BITS 16 101 #define LOCKDEP_OFF (1U << LOCKDEP 102 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 103 104 /* 105 * lockdep_{off,on}() are macros to avoid trac 106 * to header dependencies. 107 */ 108 109 #define lockdep_off() 110 do { 111 current->lockdep_recursion += LOCKDEP_ 112 } while (0) 113 114 #define lockdep_on() 115 do { 116 current->lockdep_recursion -= LOCKDEP_ 117 } while (0) 118 119 extern void lockdep_register_key(struct lock_c 120 extern void lockdep_unregister_key(struct lock 121 122 /* 123 * These methods are used by specific locking 124 * rwlocks, mutexes and rwsems) to pass init/a 125 * to lockdep: 126 */ 127 128 extern void lockdep_init_map_type(struct lockd 129 struct lock_class_key *key, int subcla 130 131 static inline void 132 lockdep_init_map_waits(struct lockdep_map *loc 133 struct lock_class_key * 134 { 135 lockdep_init_map_type(lock, name, key, 136 } 137 138 static inline void 139 lockdep_init_map_wait(struct lockdep_map *lock 140 struct lock_class_key *k 141 { 142 lockdep_init_map_waits(lock, name, key 143 } 144 145 static inline void lockdep_init_map(struct loc 146 struct lock_class 147 { 148 lockdep_init_map_wait(lock, name, key, 149 } 150 151 /* 152 * Reinitialize a lock key - for cases where t 153 * special initialization of locks so that the 154 * of dependencies wrong: they are either too 155 * or they are too narrow (they suffer from a 156 */ 157 #define lockdep_set_class(lock, key) 158 lockdep_init_map_type(&(lock)->dep_map 159 (lock)->dep_map. 160 (lock)->dep_map. 161 (lock)->dep_map. 162 163 #define lockdep_set_class_and_name(lock, key, 164 lockdep_init_map_type(&(lock)->dep_map 165 (lock)->dep_map. 166 (lock)->dep_map. 167 (lock)->dep_map. 168 169 #define lockdep_set_class_and_subclass(lock, k 170 lockdep_init_map_type(&(lock)->dep_map 171 (lock)->dep_map. 172 (lock)->dep_map. 173 (lock)->dep_map. 174 175 #define lockdep_set_subclass(lock, sub) 176 lockdep_init_map_type(&(lock)->dep_map 177 (lock)->dep_map. 178 (lock)->dep_map. 179 (lock)->dep_map. 180 181 /** 182 * lockdep_set_novalidate_class: disable check 183 * lock 184 * @lock: Lock to mark 185 * 186 * Lockdep will still record that this lock ha 187 * instances when dumping locks 188 */ 189 #define lockdep_set_novalidate_class(lock) \ 190 lockdep_set_class_and_name(lock, &__lo 191 192 /** 193 * lockdep_set_notrack_class: disable lockdep 194 * @lock: Lock to mark 195 * 196 * Bigger hammer than lockdep_set_novalidate_c 197 * which takes more locks than lockdep is able 198 */ 199 #define lockdep_set_notrack_class(lock) \ 200 lockdep_set_class_and_name(lock, &__lo 201 202 /* 203 * Compare locking classes 204 */ 205 #define lockdep_match_class(lock, key) lockdep 206 207 static inline int lockdep_match_key(struct loc 208 struct loc 209 { 210 return lock->key == key; 211 } 212 213 /* 214 * Acquire a lock. 215 * 216 * Values for "read": 217 * 218 * 0: exclusive (write) acquire 219 * 1: read-acquire (no recursion allowed) 220 * 2: read-acquire with same-instance recurs 221 * 222 * Values for check: 223 * 224 * 0: simple checks (freeing, held-at-exit-t 225 * 1: full validation 226 */ 227 extern void lock_acquire(struct lockdep_map *l 228 int trylock, int read 229 struct lockdep_map *n 230 231 extern void lock_release(struct lockdep_map *l 232 233 extern void lock_sync(struct lockdep_map *lock 234 int read, int check, str 235 unsigned long ip); 236 237 /* lock_is_held_type() returns */ 238 #define LOCK_STATE_UNKNOWN -1 239 #define LOCK_STATE_NOT_HELD 0 240 #define LOCK_STATE_HELD 1 241 242 /* 243 * Same "read" as for lock_acquire(), except - 244 */ 245 extern int lock_is_held_type(const struct lock 246 247 static inline int lock_is_held(const struct lo 248 { 249 return lock_is_held_type(lock, -1); 250 } 251 252 #define lockdep_is_held(lock) lock_i 253 #define lockdep_is_held_type(lock, r) lock_i 254 255 extern void lock_set_class(struct lockdep_map 256 struct lock_class_k 257 unsigned long ip); 258 259 #define lock_set_novalidate_class(l, n, i) \ 260 lock_set_class(l, n, &__lockdep_no_val 261 262 static inline void lock_set_subclass(struct lo 263 unsigned int subclass, unsigne 264 { 265 lock_set_class(lock, lock->name, lock- 266 } 267 268 extern void lock_downgrade(struct lockdep_map 269 270 #define NIL_COOKIE (struct pin_cookie){ .val = 271 272 extern struct pin_cookie lock_pin_lock(struct 273 extern void lock_repin_lock(struct lockdep_map 274 extern void lock_unpin_lock(struct lockdep_map 275 276 #define lockdep_depth(tsk) (debug_locks ? 277 278 #define lockdep_assert(cond) \ 279 do { WARN_ON(debug_locks && !(cond)); 280 281 #define lockdep_assert_once(cond) \ 282 do { WARN_ON_ONCE(debug_locks && !(con 283 284 #define lockdep_assert_held(l) \ 285 lockdep_assert(lockdep_is_held(l) != L 286 287 #define lockdep_assert_not_held(l) \ 288 lockdep_assert(lockdep_is_held(l) != L 289 290 #define lockdep_assert_held_write(l) \ 291 lockdep_assert(lockdep_is_held_type(l, 292 293 #define lockdep_assert_held_read(l) \ 294 lockdep_assert(lockdep_is_held_type(l, 295 296 #define lockdep_assert_held_once(l) 297 lockdep_assert_once(lockdep_is_held(l) 298 299 #define lockdep_assert_none_held_once() 300 lockdep_assert_once(!current->lockdep_ 301 302 #define lockdep_recursing(tsk) ((tsk)->lockde 303 304 #define lockdep_pin_lock(l) lock_pin_lock( 305 #define lockdep_repin_lock(l,c) lock_repin_loc 306 #define lockdep_unpin_lock(l,c) lock_unpin_loc 307 308 /* 309 * Must use lock_map_aquire_try() with overrid 310 * lockdep thinking they participate in the bl 311 */ 312 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_ 313 struct lockdep_map _name = { 314 .name = #_name "-wait-type-ove 315 .wait_type_inner = _wait_type, 316 .lock_type = LD_LOCK_WAIT_OVER 317 318 #else /* !CONFIG_LOCKDEP */ 319 320 static inline void lockdep_init_task(struct ta 321 { 322 } 323 324 static inline void lockdep_off(void) 325 { 326 } 327 328 static inline void lockdep_on(void) 329 { 330 } 331 332 static inline void lockdep_set_selftest_task(s 333 { 334 } 335 336 # define lock_acquire(l, s, t, r, c, n, i) 337 # define lock_release(l, i) 338 # define lock_downgrade(l, i) 339 # define lock_set_class(l, n, key, s, i) 340 # define lock_set_novalidate_class(l, n, i) 341 # define lock_set_subclass(l, s, i) 342 # define lockdep_init() 343 # define lockdep_init_map_type(lock, name, key 344 do { (void)(name); (void)(key) 345 # define lockdep_init_map_waits(lock, name, ke 346 do { (void)(name); (void)(key) 347 # define lockdep_init_map_wait(lock, name, key 348 do { (void)(name); (void)(key) 349 # define lockdep_init_map(lock, name, key, sub 350 do { (void)(name); (void)(key) 351 # define lockdep_set_class(lock, key) 352 # define lockdep_set_class_and_name(lock, key, 353 do { (void)(key); (void)(name) 354 #define lockdep_set_class_and_subclass(lock, k 355 do { (void)(key); } while (0) 356 #define lockdep_set_subclass(lock, sub) 357 358 #define lockdep_set_novalidate_class(lock) do 359 #define lockdep_set_notrack_class(lock) do { } 360 361 /* 362 * We don't define lockdep_match_class() and l 363 * case since the result is not well defined a 364 * #ifdef the call himself. 365 */ 366 367 # define lockdep_reset() do { d 368 # define lockdep_free_key_range(start, size) 369 # define lockdep_sys_exit() 370 371 static inline void lockdep_register_key(struct 372 { 373 } 374 375 static inline void lockdep_unregister_key(stru 376 { 377 } 378 379 #define lockdep_depth(tsk) (0) 380 381 /* 382 * Dummy forward declarations, allow users to 383 * and depend on dead code elimination. 384 */ 385 extern int lock_is_held(const void *); 386 extern int lockdep_is_held(const void *); 387 #define lockdep_is_held_type(l, r) 388 389 #define lockdep_assert(c) 390 #define lockdep_assert_once(c) 391 392 #define lockdep_assert_held(l) 393 #define lockdep_assert_not_held(l) 394 #define lockdep_assert_held_write(l) 395 #define lockdep_assert_held_read(l) 396 #define lockdep_assert_held_once(l) 397 #define lockdep_assert_none_held_once() do { } 398 399 #define lockdep_recursing(tsk) 400 401 #define NIL_COOKIE (struct pin_cookie){ } 402 403 #define lockdep_pin_lock(l) 404 #define lockdep_repin_lock(l, c) 405 #define lockdep_unpin_lock(l, c) 406 407 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_ 408 struct lockdep_map __maybe_unused _nam 409 410 #endif /* !LOCKDEP */ 411 412 #ifdef CONFIG_PROVE_LOCKING 413 void lockdep_set_lock_cmp_fn(struct lockdep_ma 414 415 #define lock_set_cmp_fn(lock, ...) lockde 416 #else 417 #define lock_set_cmp_fn(lock, ...) do { } 418 #endif 419 420 enum xhlock_context_t { 421 XHLOCK_HARD, 422 XHLOCK_SOFT, 423 XHLOCK_CTX_NR, 424 }; 425 426 /* 427 * To initialize a lockdep_map statically use 428 * Note that _name must not be NULL. 429 */ 430 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 431 { .name = (_name), .key = (void *)(_ke 432 433 static inline void lockdep_invariant_state(boo 434 static inline void lockdep_free_task(struct ta 435 436 #ifdef CONFIG_LOCK_STAT 437 438 extern void lock_contended(struct lockdep_map 439 extern void lock_acquired(struct lockdep_map * 440 441 #define LOCK_CONTENDED(_lock, try, lock) 442 do { 443 if (!try(_lock)) { 444 lock_contended(&(_lock)->dep_m 445 lock(_lock); 446 } 447 lock_acquired(&(_lock)->dep_map, _RET_ 448 } while (0) 449 450 #define LOCK_CONTENDED_RETURN(_lock, try, lock 451 ({ 452 int ____err = 0; 453 if (!try(_lock)) { 454 lock_contended(&(_lock)->dep_m 455 ____err = lock(_lock); 456 } 457 if (!____err) 458 lock_acquired(&(_lock)->dep_ma 459 ____err; 460 }) 461 462 #else /* CONFIG_LOCK_STAT */ 463 464 #define lock_contended(lockdep_map, ip) do {} 465 #define lock_acquired(lockdep_map, ip) do {} w 466 467 #define LOCK_CONTENDED(_lock, try, lock) \ 468 lock(_lock) 469 470 #define LOCK_CONTENDED_RETURN(_lock, try, lock 471 lock(_lock) 472 473 #endif /* CONFIG_LOCK_STAT */ 474 475 #ifdef CONFIG_PROVE_LOCKING 476 extern void print_irqtrace_events(struct task_ 477 #else 478 static inline void print_irqtrace_events(struc 479 { 480 } 481 #endif 482 483 /* Variable used to make lockdep treat read_lo 484 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS 485 extern unsigned int force_read_lock_recursive; 486 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 487 #define force_read_lock_recursive 0 488 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS * 489 490 #ifdef CONFIG_LOCKDEP 491 extern bool read_lock_is_recursive(void); 492 #else /* CONFIG_LOCKDEP */ 493 /* If !LOCKDEP, the value is meaningless */ 494 #define read_lock_is_recursive() 0 495 #endif 496 497 /* 498 * For trivial one-depth nesting of a lock-cla 499 * global define can be used. (Subsystems with 500 * of nesting should define their own lock-nes 501 */ 502 #define SINGLE_DEPTH_NESTING 503 504 /* 505 * Map the dependency ops to NOP or to real lo 506 * on the per lock-class debug mode: 507 */ 508 509 #define lock_acquire_exclusive(l, s, t, n, i) 510 #define lock_acquire_shared(l, s, t, n, i) 511 #define lock_acquire_shared_recursive(l, s, t, 512 513 #define spin_acquire(l, s, t, i) 514 #define spin_acquire_nest(l, s, t, n, i) 515 #define spin_release(l, i) 516 517 #define rwlock_acquire(l, s, t, i) 518 #define rwlock_acquire_read(l, s, t, i) 519 do { 520 if (read_lock_is_recursive()) 521 lock_acquire_shared_recursive( 522 else 523 lock_acquire_shared(l, s, t, N 524 } while (0) 525 526 #define rwlock_release(l, i) 527 528 #define seqcount_acquire(l, s, t, i) 529 #define seqcount_acquire_read(l, s, t, i) 530 #define seqcount_release(l, i) 531 532 #define mutex_acquire(l, s, t, i) 533 #define mutex_acquire_nest(l, s, t, n, i) 534 #define mutex_release(l, i) 535 536 #define rwsem_acquire(l, s, t, i) 537 #define rwsem_acquire_nest(l, s, t, n, i) 538 #define rwsem_acquire_read(l, s, t, i) 539 #define rwsem_release(l, i) 540 541 #define lock_map_acquire(l) 542 #define lock_map_acquire_try(l) 543 #define lock_map_acquire_read(l) 544 #define lock_map_acquire_tryread(l) 545 #define lock_map_release(l) 546 #define lock_map_sync(l) 547 548 #ifdef CONFIG_PROVE_LOCKING 549 # define might_lock(lock) 550 do { 551 typecheck(struct lockdep_map *, &(lock 552 lock_acquire(&(lock)->dep_map, 0, 0, 0 553 lock_release(&(lock)->dep_map, _THIS_I 554 } while (0) 555 # define might_lock_read(lock) 556 do { 557 typecheck(struct lockdep_map *, &(lock 558 lock_acquire(&(lock)->dep_map, 0, 0, 1 559 lock_release(&(lock)->dep_map, _THIS_I 560 } while (0) 561 # define might_lock_nested(lock, subclass) 562 do { 563 typecheck(struct lockdep_map *, &(lock 564 lock_acquire(&(lock)->dep_map, subclas 565 _THIS_IP_); 566 lock_release(&(lock)->dep_map, _THIS_I 567 } while (0) 568 569 DECLARE_PER_CPU(int, hardirqs_enabled); 570 DECLARE_PER_CPU(int, hardirq_context); 571 DECLARE_PER_CPU(unsigned int, lockdep_recursio 572 573 #define __lockdep_enabled (debug_locks & 574 575 #define lockdep_assert_irqs_enabled() 576 do { 577 WARN_ON_ONCE(__lockdep_enabled && !thi 578 } while (0) 579 580 #define lockdep_assert_irqs_disabled() 581 do { 582 WARN_ON_ONCE(__lockdep_enabled && this 583 } while (0) 584 585 #define lockdep_assert_in_irq() 586 do { 587 WARN_ON_ONCE(__lockdep_enabled && !thi 588 } while (0) 589 590 #define lockdep_assert_no_hardirq() 591 do { 592 WARN_ON_ONCE(__lockdep_enabled && (thi 593 !th 594 } while (0) 595 596 #define lockdep_assert_preemption_enabled() 597 do { 598 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT 599 __lockdep_enabled 600 (preempt_count() != 0 601 !this_cpu_read(hardirqs_ 602 } while (0) 603 604 #define lockdep_assert_preemption_disabled() 605 do { 606 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT 607 __lockdep_enabled 608 (preempt_count() == 0 609 this_cpu_read(hardirqs_e 610 } while (0) 611 612 /* 613 * Acceptable for protecting per-CPU resources 614 * Much like in_softirq() - semantics are ambi 615 */ 616 #define lockdep_assert_in_softirq() 617 do { 618 WARN_ON_ONCE(__lockdep_enabled 619 (!in_softirq() || in_irq( 620 } while (0) 621 622 extern void lockdep_assert_in_softirq_func(voi 623 624 #else 625 # define might_lock(lock) do { } while (0) 626 # define might_lock_read(lock) do { } while (0 627 # define might_lock_nested(lock, subclass) do 628 629 # define lockdep_assert_irqs_enabled() do { } 630 # define lockdep_assert_irqs_disabled() do { } 631 # define lockdep_assert_in_irq() do { } while 632 # define lockdep_assert_no_hardirq() do { } wh 633 634 # define lockdep_assert_preemption_enabled() d 635 # define lockdep_assert_preemption_disabled() 636 # define lockdep_assert_in_softirq() do { } wh 637 # define lockdep_assert_in_softirq_func() do { 638 #endif 639 640 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING 641 642 # define lockdep_assert_RT_in_threaded_ctx() d 643 WARN_ONCE(debug_locks && !curr 644 lockdep_hardirq_cont 645 !(current->hardirq_t 646 "Not in threaded con 647 } while (0) 648 649 #else 650 651 # define lockdep_assert_RT_in_threaded_ctx() d 652 653 #endif 654 655 #ifdef CONFIG_LOCKDEP 656 void lockdep_rcu_suspicious(const char *file, 657 #else 658 static inline void 659 lockdep_rcu_suspicious(const char *file, const 660 { 661 } 662 #endif 663 664 #endif /* __LINUX_LOCKDEP_H */ 665
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.