~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/lockdep.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/lockdep.h (Version linux-6.12-rc7) and /include/linux/lockdep.h (Version linux-5.2.21)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*                                                  2 /*
  3  * Runtime locking correctness validator            3  * Runtime locking correctness validator
  4  *                                                  4  *
  5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ing      5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zi      6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  7  *                                                  7  *
  8  * see Documentation/locking/lockdep-design.rs !!   8  * see Documentation/locking/lockdep-design.txt for more details.
  9  */                                                 9  */
 10 #ifndef __LINUX_LOCKDEP_H                          10 #ifndef __LINUX_LOCKDEP_H
 11 #define __LINUX_LOCKDEP_H                          11 #define __LINUX_LOCKDEP_H
 12                                                    12 
 13 #include <linux/lockdep_types.h>               << 
 14 #include <linux/smp.h>                         << 
 15 #include <asm/percpu.h>                        << 
 16                                                << 
 17 struct task_struct;                                13 struct task_struct;
                                                   >>  14 struct lockdep_map;
                                                   >>  15 
                                                   >>  16 /* for sysctl */
                                                   >>  17 extern int prove_locking;
                                                   >>  18 extern int lock_stat;
                                                   >>  19 
                                                   >>  20 #define MAX_LOCKDEP_SUBCLASSES          8UL
                                                   >>  21 
                                                   >>  22 #include <linux/types.h>
 18                                                    23 
 19 #ifdef CONFIG_LOCKDEP                              24 #ifdef CONFIG_LOCKDEP
 20                                                    25 
 21 #include <linux/linkage.h>                         26 #include <linux/linkage.h>
 22 #include <linux/list.h>                            27 #include <linux/list.h>
 23 #include <linux/debug_locks.h>                     28 #include <linux/debug_locks.h>
 24 #include <linux/stacktrace.h>                      29 #include <linux/stacktrace.h>
 25                                                    30 
                                                   >>  31 /*
                                                   >>  32  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
                                                   >>  33  * the total number of states... :-(
                                                   >>  34  */
                                                   >>  35 #define XXX_LOCK_USAGE_STATES           (1+2*4)
                                                   >>  36 
                                                   >>  37 /*
                                                   >>  38  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
                                                   >>  39  * cached in the instance of lockdep_map
                                                   >>  40  *
                                                   >>  41  * Currently main class (subclass == 0) and signle depth subclass
                                                   >>  42  * are cached in lockdep_map. This optimization is mainly targeting
                                                   >>  43  * on rq->lock. double_rq_lock() acquires this highly competitive with
                                                   >>  44  * single depth.
                                                   >>  45  */
                                                   >>  46 #define NR_LOCKDEP_CACHING_CLASSES      2
                                                   >>  47 
                                                   >>  48 /*
                                                   >>  49  * A lockdep key is associated with each lock object. For static locks we use
                                                   >>  50  * the lock address itself as the key. Dynamically allocated lock objects can
                                                   >>  51  * have a statically or dynamically allocated key. Dynamically allocated lock
                                                   >>  52  * keys must be registered before being used and must be unregistered before
                                                   >>  53  * the key memory is freed.
                                                   >>  54  */
                                                   >>  55 struct lockdep_subclass_key {
                                                   >>  56         char __one_byte;
                                                   >>  57 } __attribute__ ((__packed__));
                                                   >>  58 
                                                   >>  59 /* hash_entry is used to keep track of dynamically allocated keys. */
                                                   >>  60 struct lock_class_key {
                                                   >>  61         union {
                                                   >>  62                 struct hlist_node               hash_entry;
                                                   >>  63                 struct lockdep_subclass_key     subkeys[MAX_LOCKDEP_SUBCLASSES];
                                                   >>  64         };
                                                   >>  65 };
                                                   >>  66 
                                                   >>  67 extern struct lock_class_key __lockdep_no_validate__;
                                                   >>  68 
                                                   >>  69 struct lock_trace {
                                                   >>  70         unsigned int            nr_entries;
                                                   >>  71         unsigned int            offset;
                                                   >>  72 };
                                                   >>  73 
                                                   >>  74 #define LOCKSTAT_POINTS         4
                                                   >>  75 
                                                   >>  76 /*
                                                   >>  77  * The lock-class itself. The order of the structure members matters.
                                                   >>  78  * reinit_class() zeroes the key member and all subsequent members.
                                                   >>  79  */
                                                   >>  80 struct lock_class {
                                                   >>  81         /*
                                                   >>  82          * class-hash:
                                                   >>  83          */
                                                   >>  84         struct hlist_node               hash_entry;
                                                   >>  85 
                                                   >>  86         /*
                                                   >>  87          * Entry in all_lock_classes when in use. Entry in free_lock_classes
                                                   >>  88          * when not in use. Instances that are being freed are on one of the
                                                   >>  89          * zapped_classes lists.
                                                   >>  90          */
                                                   >>  91         struct list_head                lock_entry;
                                                   >>  92 
                                                   >>  93         /*
                                                   >>  94          * These fields represent a directed graph of lock dependencies,
                                                   >>  95          * to every node we attach a list of "forward" and a list of
                                                   >>  96          * "backward" graph nodes.
                                                   >>  97          */
                                                   >>  98         struct list_head                locks_after, locks_before;
                                                   >>  99 
                                                   >> 100         struct lockdep_subclass_key     *key;
                                                   >> 101         unsigned int                    subclass;
                                                   >> 102         unsigned int                    dep_gen_id;
                                                   >> 103 
                                                   >> 104         /*
                                                   >> 105          * IRQ/softirq usage tracking bits:
                                                   >> 106          */
                                                   >> 107         unsigned long                   usage_mask;
                                                   >> 108         struct lock_trace               usage_traces[XXX_LOCK_USAGE_STATES];
                                                   >> 109 
                                                   >> 110         /*
                                                   >> 111          * Generation counter, when doing certain classes of graph walking,
                                                   >> 112          * to ensure that we check one node only once:
                                                   >> 113          */
                                                   >> 114         int                             name_version;
                                                   >> 115         const char                      *name;
                                                   >> 116 
                                                   >> 117 #ifdef CONFIG_LOCK_STAT
                                                   >> 118         unsigned long                   contention_point[LOCKSTAT_POINTS];
                                                   >> 119         unsigned long                   contending_point[LOCKSTAT_POINTS];
                                                   >> 120 #endif
                                                   >> 121 } __no_randomize_layout;
                                                   >> 122 
                                                   >> 123 #ifdef CONFIG_LOCK_STAT
                                                   >> 124 struct lock_time {
                                                   >> 125         s64                             min;
                                                   >> 126         s64                             max;
                                                   >> 127         s64                             total;
                                                   >> 128         unsigned long                   nr;
                                                   >> 129 };
                                                   >> 130 
                                                   >> 131 enum bounce_type {
                                                   >> 132         bounce_acquired_write,
                                                   >> 133         bounce_acquired_read,
                                                   >> 134         bounce_contended_write,
                                                   >> 135         bounce_contended_read,
                                                   >> 136         nr_bounce_types,
                                                   >> 137 
                                                   >> 138         bounce_acquired = bounce_acquired_write,
                                                   >> 139         bounce_contended = bounce_contended_write,
                                                   >> 140 };
                                                   >> 141 
                                                   >> 142 struct lock_class_stats {
                                                   >> 143         unsigned long                   contention_point[LOCKSTAT_POINTS];
                                                   >> 144         unsigned long                   contending_point[LOCKSTAT_POINTS];
                                                   >> 145         struct lock_time                read_waittime;
                                                   >> 146         struct lock_time                write_waittime;
                                                   >> 147         struct lock_time                read_holdtime;
                                                   >> 148         struct lock_time                write_holdtime;
                                                   >> 149         unsigned long                   bounces[nr_bounce_types];
                                                   >> 150 };
                                                   >> 151 
                                                   >> 152 struct lock_class_stats lock_stats(struct lock_class *class);
                                                   >> 153 void clear_lock_stats(struct lock_class *class);
                                                   >> 154 #endif
                                                   >> 155 
                                                   >> 156 /*
                                                   >> 157  * Map the lock object (the lock instance) to the lock-class object.
                                                   >> 158  * This is embedded into specific lock instances:
                                                   >> 159  */
                                                   >> 160 struct lockdep_map {
                                                   >> 161         struct lock_class_key           *key;
                                                   >> 162         struct lock_class               *class_cache[NR_LOCKDEP_CACHING_CLASSES];
                                                   >> 163         const char                      *name;
                                                   >> 164 #ifdef CONFIG_LOCK_STAT
                                                   >> 165         int                             cpu;
                                                   >> 166         unsigned long                   ip;
                                                   >> 167 #endif
                                                   >> 168 };
                                                   >> 169 
 26 static inline void lockdep_copy_map(struct loc    170 static inline void lockdep_copy_map(struct lockdep_map *to,
 27                                     struct loc    171                                     struct lockdep_map *from)
 28 {                                                 172 {
 29         int i;                                    173         int i;
 30                                                   174 
 31         *to = *from;                              175         *to = *from;
 32         /*                                        176         /*
 33          * Since the class cache can be modifi    177          * Since the class cache can be modified concurrently we could observe
 34          * half pointers (64bit arch using 32b    178          * half pointers (64bit arch using 32bit copy insns). Therefore clear
 35          * the caches and take the performance    179          * the caches and take the performance hit.
 36          *                                        180          *
 37          * XXX it doesn't work well with lockd    181          * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
 38          *     that relies on cache abuse.        182          *     that relies on cache abuse.
 39          */                                       183          */
 40         for (i = 0; i < NR_LOCKDEP_CACHING_CLA    184         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
 41                 to->class_cache[i] = NULL;        185                 to->class_cache[i] = NULL;
 42 }                                                 186 }
 43                                                   187 
 44 /*                                                188 /*
 45  * Every lock has a list of other locks that w    189  * Every lock has a list of other locks that were taken after it.
 46  * We only grow the list, never remove from it    190  * We only grow the list, never remove from it:
 47  */                                               191  */
 48 struct lock_list {                                192 struct lock_list {
 49         struct list_head                entry;    193         struct list_head                entry;
 50         struct lock_class               *class    194         struct lock_class               *class;
 51         struct lock_class               *links    195         struct lock_class               *links_to;
 52         const struct lock_trace         *trace !! 196         struct lock_trace               trace;
 53         u16                             distan !! 197         int                             distance;
 54         /* bitmap of different dependencies fr << 
 55         u8                              dep;   << 
 56         /* used by BFS to record whether "prev << 
 57         u8                              only_x << 
 58                                                   198 
 59         /*                                        199         /*
 60          * The parent field is used to impleme    200          * The parent field is used to implement breadth-first search, and the
 61          * bit 0 is reused to indicate if the     201          * bit 0 is reused to indicate if the lock has been accessed in BFS.
 62          */                                       202          */
 63         struct lock_list                *paren    203         struct lock_list                *parent;
 64 };                                                204 };
 65                                                   205 
 66 /**                                            !! 206 /*
 67  * struct lock_chain - lock dependency chain r !! 207  * We record lock dependency chains, so that we can cache them:
 68  *                                             << 
 69  * @irq_context: the same as irq_context in he << 
 70  * @depth:       the number of held locks in t << 
 71  * @base:        the index in chain_hlocks for << 
 72  * @entry:       the collided lock chains in l << 
 73  * @chain_key:   the hash key of this lock_cha << 
 74  */                                               208  */
 75 struct lock_chain {                               209 struct lock_chain {
 76         /* see BUILD_BUG_ON()s in add_chain_ca !! 210         /* see BUILD_BUG_ON()s in lookup_chain_cache() */
 77         unsigned int                    irq_co    211         unsigned int                    irq_context :  2,
 78                                         depth     212                                         depth       :  6,
 79                                         base      213                                         base        : 24;
 80         /* 4 byte hole */                         214         /* 4 byte hole */
 81         struct hlist_node               entry;    215         struct hlist_node               entry;
 82         u64                             chain_    216         u64                             chain_key;
 83 };                                                217 };
 84                                                   218 
                                                   >> 219 #define MAX_LOCKDEP_KEYS_BITS           13
                                                   >> 220 /*
                                                   >> 221  * Subtract one because we offset hlock->class_idx by 1 in order
                                                   >> 222  * to make 0 mean no class. This avoids overflowing the class_idx
                                                   >> 223  * bitfield and hitting the BUG in hlock_class().
                                                   >> 224  */
                                                   >> 225 #define MAX_LOCKDEP_KEYS                ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
                                                   >> 226 
                                                   >> 227 struct held_lock {
                                                   >> 228         /*
                                                   >> 229          * One-way hash of the dependency chain up to this point. We
                                                   >> 230          * hash the hashes step by step as the dependency chain grows.
                                                   >> 231          *
                                                   >> 232          * We use it for dependency-caching and we skip detection
                                                   >> 233          * passes and dependency-updates if there is a cache-hit, so
                                                   >> 234          * it is absolutely critical for 100% coverage of the validator
                                                   >> 235          * to have a unique key value for every unique dependency path
                                                   >> 236          * that can occur in the system, to make a unique hash value
                                                   >> 237          * as likely as possible - hence the 64-bit width.
                                                   >> 238          *
                                                   >> 239          * The task struct holds the current hash value (initialized
                                                   >> 240          * with zero), here we store the previous hash value:
                                                   >> 241          */
                                                   >> 242         u64                             prev_chain_key;
                                                   >> 243         unsigned long                   acquire_ip;
                                                   >> 244         struct lockdep_map              *instance;
                                                   >> 245         struct lockdep_map              *nest_lock;
                                                   >> 246 #ifdef CONFIG_LOCK_STAT
                                                   >> 247         u64                             waittime_stamp;
                                                   >> 248         u64                             holdtime_stamp;
                                                   >> 249 #endif
                                                   >> 250         unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
                                                   >> 251         /*
                                                   >> 252          * The lock-stack is unified in that the lock chains of interrupt
                                                   >> 253          * contexts nest ontop of process context chains, but we 'separate'
                                                   >> 254          * the hashes by starting with 0 if we cross into an interrupt
                                                   >> 255          * context, and we also keep do not add cross-context lock
                                                   >> 256          * dependencies - the lock usage graph walking covers that area
                                                   >> 257          * anyway, and we'd just unnecessarily increase the number of
                                                   >> 258          * dependencies otherwise. [Note: hardirq and softirq contexts
                                                   >> 259          * are separated from each other too.]
                                                   >> 260          *
                                                   >> 261          * The following field is used to detect when we cross into an
                                                   >> 262          * interrupt context:
                                                   >> 263          */
                                                   >> 264         unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
                                                   >> 265         unsigned int trylock:1;                                         /* 16 bits */
                                                   >> 266 
                                                   >> 267         unsigned int read:2;        /* see lock_acquire() comment */
                                                   >> 268         unsigned int check:1;       /* see lock_acquire() comment */
                                                   >> 269         unsigned int hardirqs_off:1;
                                                   >> 270         unsigned int references:12;                                     /* 32 bits */
                                                   >> 271         unsigned int pin_count;
                                                   >> 272 };
                                                   >> 273 
 85 /*                                                274 /*
 86  * Initialization, self-test and debugging-out    275  * Initialization, self-test and debugging-output methods:
 87  */                                               276  */
 88 extern void lockdep_init(void);                   277 extern void lockdep_init(void);
 89 extern void lockdep_reset(void);                  278 extern void lockdep_reset(void);
 90 extern void lockdep_reset_lock(struct lockdep_    279 extern void lockdep_reset_lock(struct lockdep_map *lock);
 91 extern void lockdep_free_key_range(void *start    280 extern void lockdep_free_key_range(void *start, unsigned long size);
 92 extern asmlinkage void lockdep_sys_exit(void);    281 extern asmlinkage void lockdep_sys_exit(void);
 93 extern void lockdep_set_selftest_task(struct t    282 extern void lockdep_set_selftest_task(struct task_struct *task);
 94                                                   283 
 95 extern void lockdep_init_task(struct task_stru !! 284 extern void lockdep_off(void);
 96                                                !! 285 extern void lockdep_on(void);
 97 /*                                             << 
 98  * Split the recursion counter in two to readi << 
 99  */                                            << 
100 #define LOCKDEP_RECURSION_BITS  16             << 
101 #define LOCKDEP_OFF             (1U << LOCKDEP << 
102 #define LOCKDEP_RECURSION_MASK  (LOCKDEP_OFF - << 
103                                                << 
104 /*                                             << 
105  * lockdep_{off,on}() are macros to avoid trac << 
106  * to header dependencies.                     << 
107  */                                            << 
108                                                << 
109 #define lockdep_off()                          << 
110 do {                                           << 
111         current->lockdep_recursion += LOCKDEP_ << 
112 } while (0)                                    << 
113                                                << 
114 #define lockdep_on()                           << 
115 do {                                           << 
116         current->lockdep_recursion -= LOCKDEP_ << 
117 } while (0)                                    << 
118                                                   286 
119 extern void lockdep_register_key(struct lock_c    287 extern void lockdep_register_key(struct lock_class_key *key);
120 extern void lockdep_unregister_key(struct lock    288 extern void lockdep_unregister_key(struct lock_class_key *key);
121                                                   289 
122 /*                                                290 /*
123  * These methods are used by specific locking     291  * These methods are used by specific locking variants (spinlocks,
124  * rwlocks, mutexes and rwsems) to pass init/a    292  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
125  * to lockdep:                                    293  * to lockdep:
126  */                                               294  */
127                                                   295 
128 extern void lockdep_init_map_type(struct lockd !! 296 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
129         struct lock_class_key *key, int subcla !! 297                              struct lock_class_key *key, int subclass);
130                                                << 
131 static inline void                             << 
132 lockdep_init_map_waits(struct lockdep_map *loc << 
133                        struct lock_class_key * << 
134 {                                              << 
135         lockdep_init_map_type(lock, name, key, << 
136 }                                              << 
137                                                << 
138 static inline void                             << 
139 lockdep_init_map_wait(struct lockdep_map *lock << 
140                       struct lock_class_key *k << 
141 {                                              << 
142         lockdep_init_map_waits(lock, name, key << 
143 }                                              << 
144                                                << 
145 static inline void lockdep_init_map(struct loc << 
146                              struct lock_class << 
147 {                                              << 
148         lockdep_init_map_wait(lock, name, key, << 
149 }                                              << 
150                                                   298 
151 /*                                                299 /*
152  * Reinitialize a lock key - for cases where t    300  * Reinitialize a lock key - for cases where there is special locking or
153  * special initialization of locks so that the    301  * special initialization of locks so that the validator gets the scope
154  * of dependencies wrong: they are either too     302  * of dependencies wrong: they are either too broad (they need a class-split)
155  * or they are too narrow (they suffer from a     303  * or they are too narrow (they suffer from a false class-split):
156  */                                               304  */
157 #define lockdep_set_class(lock, key)           !! 305 #define lockdep_set_class(lock, key) \
158         lockdep_init_map_type(&(lock)->dep_map !! 306                 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
159                               (lock)->dep_map. !! 307 #define lockdep_set_class_and_name(lock, key, name) \
160                               (lock)->dep_map. !! 308                 lockdep_init_map(&(lock)->dep_map, name, key, 0)
161                               (lock)->dep_map. !! 309 #define lockdep_set_class_and_subclass(lock, key, sub) \
162                                                !! 310                 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
163 #define lockdep_set_class_and_name(lock, key,  !! 311 #define lockdep_set_subclass(lock, sub) \
164         lockdep_init_map_type(&(lock)->dep_map !! 312                 lockdep_init_map(&(lock)->dep_map, #lock, \
165                               (lock)->dep_map. !! 313                                  (lock)->dep_map.key, sub)
166                               (lock)->dep_map. !! 314 
167                               (lock)->dep_map. << 
168                                                << 
169 #define lockdep_set_class_and_subclass(lock, k << 
170         lockdep_init_map_type(&(lock)->dep_map << 
171                               (lock)->dep_map. << 
172                               (lock)->dep_map. << 
173                               (lock)->dep_map. << 
174                                                << 
175 #define lockdep_set_subclass(lock, sub)        << 
176         lockdep_init_map_type(&(lock)->dep_map << 
177                               (lock)->dep_map. << 
178                               (lock)->dep_map. << 
179                               (lock)->dep_map. << 
180                                                << 
181 /**                                            << 
182  * lockdep_set_novalidate_class: disable check << 
183  * lock                                        << 
184  * @lock: Lock to mark                         << 
185  *                                             << 
186  * Lockdep will still record that this lock ha << 
187  * instances when dumping locks                << 
188  */                                            << 
189 #define lockdep_set_novalidate_class(lock) \      315 #define lockdep_set_novalidate_class(lock) \
190         lockdep_set_class_and_name(lock, &__lo    316         lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
191                                                << 
192 /**                                            << 
193  * lockdep_set_notrack_class: disable lockdep  << 
194  * @lock: Lock to mark                         << 
195  *                                             << 
196  * Bigger hammer than lockdep_set_novalidate_c << 
197  * which takes more locks than lockdep is able << 
198  */                                            << 
199 #define lockdep_set_notrack_class(lock) \      << 
200         lockdep_set_class_and_name(lock, &__lo << 
201                                                << 
202 /*                                                317 /*
203  * Compare locking classes                        318  * Compare locking classes
204  */                                               319  */
205 #define lockdep_match_class(lock, key) lockdep    320 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
206                                                   321 
207 static inline int lockdep_match_key(struct loc    322 static inline int lockdep_match_key(struct lockdep_map *lock,
208                                     struct loc    323                                     struct lock_class_key *key)
209 {                                                 324 {
210         return lock->key == key;                  325         return lock->key == key;
211 }                                                 326 }
212                                                   327 
213 /*                                                328 /*
214  * Acquire a lock.                                329  * Acquire a lock.
215  *                                                330  *
216  * Values for "read":                             331  * Values for "read":
217  *                                                332  *
218  *   0: exclusive (write) acquire                 333  *   0: exclusive (write) acquire
219  *   1: read-acquire (no recursion allowed)       334  *   1: read-acquire (no recursion allowed)
220  *   2: read-acquire with same-instance recurs    335  *   2: read-acquire with same-instance recursion allowed
221  *                                                336  *
222  * Values for check:                              337  * Values for check:
223  *                                                338  *
224  *   0: simple checks (freeing, held-at-exit-t    339  *   0: simple checks (freeing, held-at-exit-time, etc.)
225  *   1: full validation                           340  *   1: full validation
226  */                                               341  */
227 extern void lock_acquire(struct lockdep_map *l    342 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
228                          int trylock, int read    343                          int trylock, int read, int check,
229                          struct lockdep_map *n    344                          struct lockdep_map *nest_lock, unsigned long ip);
230                                                   345 
231 extern void lock_release(struct lockdep_map *l !! 346 extern void lock_release(struct lockdep_map *lock, int nested,
232                                                !! 347                          unsigned long ip);
233 extern void lock_sync(struct lockdep_map *lock << 
234                       int read, int check, str << 
235                       unsigned long ip);       << 
236                                                << 
237 /* lock_is_held_type() returns */              << 
238 #define LOCK_STATE_UNKNOWN      -1             << 
239 #define LOCK_STATE_NOT_HELD     0              << 
240 #define LOCK_STATE_HELD         1              << 
241                                                   348 
242 /*                                                349 /*
243  * Same "read" as for lock_acquire(), except -    350  * Same "read" as for lock_acquire(), except -1 means any.
244  */                                               351  */
245 extern int lock_is_held_type(const struct lock    352 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
246                                                   353 
247 static inline int lock_is_held(const struct lo    354 static inline int lock_is_held(const struct lockdep_map *lock)
248 {                                                 355 {
249         return lock_is_held_type(lock, -1);       356         return lock_is_held_type(lock, -1);
250 }                                                 357 }
251                                                   358 
252 #define lockdep_is_held(lock)           lock_i    359 #define lockdep_is_held(lock)           lock_is_held(&(lock)->dep_map)
253 #define lockdep_is_held_type(lock, r)   lock_i    360 #define lockdep_is_held_type(lock, r)   lock_is_held_type(&(lock)->dep_map, (r))
254                                                   361 
255 extern void lock_set_class(struct lockdep_map     362 extern void lock_set_class(struct lockdep_map *lock, const char *name,
256                            struct lock_class_k    363                            struct lock_class_key *key, unsigned int subclass,
257                            unsigned long ip);     364                            unsigned long ip);
258                                                   365 
259 #define lock_set_novalidate_class(l, n, i) \   << 
260         lock_set_class(l, n, &__lockdep_no_val << 
261                                                << 
262 static inline void lock_set_subclass(struct lo    366 static inline void lock_set_subclass(struct lockdep_map *lock,
263                 unsigned int subclass, unsigne    367                 unsigned int subclass, unsigned long ip)
264 {                                                 368 {
265         lock_set_class(lock, lock->name, lock-    369         lock_set_class(lock, lock->name, lock->key, subclass, ip);
266 }                                                 370 }
267                                                   371 
268 extern void lock_downgrade(struct lockdep_map     372 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
269                                                   373 
                                                   >> 374 struct pin_cookie { unsigned int val; };
                                                   >> 375 
270 #define NIL_COOKIE (struct pin_cookie){ .val =    376 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
271                                                   377 
272 extern struct pin_cookie lock_pin_lock(struct     378 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
273 extern void lock_repin_lock(struct lockdep_map    379 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
274 extern void lock_unpin_lock(struct lockdep_map    380 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
275                                                   381 
276 #define lockdep_depth(tsk)      (debug_locks ?    382 #define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
277                                                   383 
278 #define lockdep_assert(cond)            \      !! 384 #define lockdep_assert_held(l)  do {                            \
279         do { WARN_ON(debug_locks && !(cond));  !! 385                 WARN_ON(debug_locks && !lockdep_is_held(l));    \
280                                                !! 386         } while (0)
281 #define lockdep_assert_once(cond)       \      !! 387 
282         do { WARN_ON_ONCE(debug_locks && !(con !! 388 #define lockdep_assert_held_exclusive(l)        do {                    \
283                                                !! 389                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));    \
284 #define lockdep_assert_held(l)          \      !! 390         } while (0)
285         lockdep_assert(lockdep_is_held(l) != L !! 391 
286                                                !! 392 #define lockdep_assert_held_read(l)     do {                            \
287 #define lockdep_assert_not_held(l)      \      !! 393                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));    \
288         lockdep_assert(lockdep_is_held(l) != L !! 394         } while (0)
289                                                !! 395 
290 #define lockdep_assert_held_write(l)    \      !! 396 #define lockdep_assert_held_once(l)     do {                            \
291         lockdep_assert(lockdep_is_held_type(l, !! 397                 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
292                                                !! 398         } while (0)
293 #define lockdep_assert_held_read(l)     \      << 
294         lockdep_assert(lockdep_is_held_type(l, << 
295                                                << 
296 #define lockdep_assert_held_once(l)            << 
297         lockdep_assert_once(lockdep_is_held(l) << 
298                                                << 
299 #define lockdep_assert_none_held_once()        << 
300         lockdep_assert_once(!current->lockdep_ << 
301                                                   399 
302 #define lockdep_recursing(tsk)  ((tsk)->lockde    400 #define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
303                                                   401 
304 #define lockdep_pin_lock(l)     lock_pin_lock(    402 #define lockdep_pin_lock(l)     lock_pin_lock(&(l)->dep_map)
305 #define lockdep_repin_lock(l,c) lock_repin_loc    403 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
306 #define lockdep_unpin_lock(l,c) lock_unpin_loc    404 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
307                                                   405 
308 /*                                             << 
309  * Must use lock_map_aquire_try() with overrid << 
310  * lockdep thinking they participate in the bl << 
311  */                                            << 
312 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_ << 
313         struct lockdep_map _name = {           << 
314                 .name = #_name "-wait-type-ove << 
315                 .wait_type_inner = _wait_type, << 
316                 .lock_type = LD_LOCK_WAIT_OVER << 
317                                                << 
318 #else /* !CONFIG_LOCKDEP */                       406 #else /* !CONFIG_LOCKDEP */
319                                                   407 
320 static inline void lockdep_init_task(struct ta << 
321 {                                              << 
322 }                                              << 
323                                                << 
324 static inline void lockdep_off(void)              408 static inline void lockdep_off(void)
325 {                                                 409 {
326 }                                                 410 }
327                                                   411 
328 static inline void lockdep_on(void)               412 static inline void lockdep_on(void)
329 {                                                 413 {
330 }                                                 414 }
331                                                   415 
332 static inline void lockdep_set_selftest_task(s    416 static inline void lockdep_set_selftest_task(struct task_struct *task)
333 {                                                 417 {
334 }                                                 418 }
335                                                   419 
336 # define lock_acquire(l, s, t, r, c, n, i)        420 # define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
337 # define lock_release(l, i)                    !! 421 # define lock_release(l, n, i)                  do { } while (0)
338 # define lock_downgrade(l, i)                     422 # define lock_downgrade(l, i)                   do { } while (0)
339 # define lock_set_class(l, n, key, s, i)       !! 423 # define lock_set_class(l, n, k, s, i)          do { } while (0)
340 # define lock_set_novalidate_class(l, n, i)    << 
341 # define lock_set_subclass(l, s, i)               424 # define lock_set_subclass(l, s, i)             do { } while (0)
342 # define lockdep_init()                           425 # define lockdep_init()                         do { } while (0)
343 # define lockdep_init_map_type(lock, name, key << 
344                 do { (void)(name); (void)(key) << 
345 # define lockdep_init_map_waits(lock, name, ke << 
346                 do { (void)(name); (void)(key) << 
347 # define lockdep_init_map_wait(lock, name, key << 
348                 do { (void)(name); (void)(key) << 
349 # define lockdep_init_map(lock, name, key, sub    426 # define lockdep_init_map(lock, name, key, sub) \
350                 do { (void)(name); (void)(key)    427                 do { (void)(name); (void)(key); } while (0)
351 # define lockdep_set_class(lock, key)             428 # define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
352 # define lockdep_set_class_and_name(lock, key,    429 # define lockdep_set_class_and_name(lock, key, name) \
353                 do { (void)(key); (void)(name)    430                 do { (void)(key); (void)(name); } while (0)
354 #define lockdep_set_class_and_subclass(lock, k    431 #define lockdep_set_class_and_subclass(lock, key, sub) \
355                 do { (void)(key); } while (0)     432                 do { (void)(key); } while (0)
356 #define lockdep_set_subclass(lock, sub)           433 #define lockdep_set_subclass(lock, sub)         do { } while (0)
357                                                   434 
358 #define lockdep_set_novalidate_class(lock) do     435 #define lockdep_set_novalidate_class(lock) do { } while (0)
359 #define lockdep_set_notrack_class(lock) do { } << 
360                                                   436 
361 /*                                                437 /*
362  * We don't define lockdep_match_class() and l    438  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
363  * case since the result is not well defined a    439  * case since the result is not well defined and the caller should rather
364  * #ifdef the call himself.                       440  * #ifdef the call himself.
365  */                                               441  */
366                                                   442 
367 # define lockdep_reset()                do { d    443 # define lockdep_reset()                do { debug_locks = 1; } while (0)
368 # define lockdep_free_key_range(start, size)      444 # define lockdep_free_key_range(start, size)    do { } while (0)
369 # define lockdep_sys_exit()                       445 # define lockdep_sys_exit()                     do { } while (0)
                                                   >> 446 /*
                                                   >> 447  * The class key takes no space if lockdep is disabled:
                                                   >> 448  */
                                                   >> 449 struct lock_class_key { };
370                                                   450 
371 static inline void lockdep_register_key(struct    451 static inline void lockdep_register_key(struct lock_class_key *key)
372 {                                                 452 {
373 }                                                 453 }
374                                                   454 
375 static inline void lockdep_unregister_key(stru    455 static inline void lockdep_unregister_key(struct lock_class_key *key)
376 {                                                 456 {
377 }                                                 457 }
378                                                   458 
379 #define lockdep_depth(tsk)      (0)            << 
380                                                << 
381 /*                                                459 /*
382  * Dummy forward declarations, allow users to  !! 460  * The lockdep_map takes no space if lockdep is disabled:
383  * and depend on dead code elimination.        << 
384  */                                               461  */
385 extern int lock_is_held(const void *);         !! 462 struct lockdep_map { };
386 extern int lockdep_is_held(const void *);      !! 463 
387 #define lockdep_is_held_type(l, r)             !! 464 #define lockdep_depth(tsk)      (0)
388                                                   465 
389 #define lockdep_assert(c)                      !! 466 #define lockdep_is_held_type(l, r)              (1)
390 #define lockdep_assert_once(c)                 << 
391                                                   467 
392 #define lockdep_assert_held(l)                    468 #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
393 #define lockdep_assert_not_held(l)             !! 469 #define lockdep_assert_held_exclusive(l)        do { (void)(l); } while (0)
394 #define lockdep_assert_held_write(l)           << 
395 #define lockdep_assert_held_read(l)               470 #define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
396 #define lockdep_assert_held_once(l)               471 #define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
397 #define lockdep_assert_none_held_once() do { } << 
398                                                   472 
399 #define lockdep_recursing(tsk)                    473 #define lockdep_recursing(tsk)                  (0)
400                                                   474 
                                                   >> 475 struct pin_cookie { };
                                                   >> 476 
401 #define NIL_COOKIE (struct pin_cookie){ }         477 #define NIL_COOKIE (struct pin_cookie){ }
402                                                   478 
403 #define lockdep_pin_lock(l)                       479 #define lockdep_pin_lock(l)                     ({ struct pin_cookie cookie = { }; cookie; })
404 #define lockdep_repin_lock(l, c)                  480 #define lockdep_repin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
405 #define lockdep_unpin_lock(l, c)                  481 #define lockdep_unpin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
406                                                   482 
407 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_ << 
408         struct lockdep_map __maybe_unused _nam << 
409                                                << 
410 #endif /* !LOCKDEP */                             483 #endif /* !LOCKDEP */
411                                                   484 
412 #ifdef CONFIG_PROVE_LOCKING                    << 
413 void lockdep_set_lock_cmp_fn(struct lockdep_ma << 
414                                                << 
415 #define lock_set_cmp_fn(lock, ...)      lockde << 
416 #else                                          << 
417 #define lock_set_cmp_fn(lock, ...)      do { } << 
418 #endif                                         << 
419                                                << 
420 enum xhlock_context_t {                           485 enum xhlock_context_t {
421         XHLOCK_HARD,                              486         XHLOCK_HARD,
422         XHLOCK_SOFT,                              487         XHLOCK_SOFT,
423         XHLOCK_CTX_NR,                            488         XHLOCK_CTX_NR,
424 };                                                489 };
425                                                   490 
                                                   >> 491 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
426 /*                                                492 /*
427  * To initialize a lockdep_map statically use     493  * To initialize a lockdep_map statically use this macro.
428  * Note that _name must not be NULL.              494  * Note that _name must not be NULL.
429  */                                               495  */
430 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \    496 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
431         { .name = (_name), .key = (void *)(_ke    497         { .name = (_name), .key = (void *)(_key), }
432                                                   498 
433 static inline void lockdep_invariant_state(boo    499 static inline void lockdep_invariant_state(bool force) {}
                                                   >> 500 static inline void lockdep_init_task(struct task_struct *task) {}
434 static inline void lockdep_free_task(struct ta    501 static inline void lockdep_free_task(struct task_struct *task) {}
435                                                   502 
436 #ifdef CONFIG_LOCK_STAT                           503 #ifdef CONFIG_LOCK_STAT
437                                                   504 
438 extern void lock_contended(struct lockdep_map     505 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
439 extern void lock_acquired(struct lockdep_map *    506 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
440                                                   507 
441 #define LOCK_CONTENDED(_lock, try, lock)          508 #define LOCK_CONTENDED(_lock, try, lock)                        \
442 do {                                              509 do {                                                            \
443         if (!try(_lock)) {                        510         if (!try(_lock)) {                                      \
444                 lock_contended(&(_lock)->dep_m    511                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
445                 lock(_lock);                      512                 lock(_lock);                                    \
446         }                                         513         }                                                       \
447         lock_acquired(&(_lock)->dep_map, _RET_    514         lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
448 } while (0)                                       515 } while (0)
449                                                   516 
450 #define LOCK_CONTENDED_RETURN(_lock, try, lock    517 #define LOCK_CONTENDED_RETURN(_lock, try, lock)                 \
451 ({                                                518 ({                                                              \
452         int ____err = 0;                          519         int ____err = 0;                                        \
453         if (!try(_lock)) {                        520         if (!try(_lock)) {                                      \
454                 lock_contended(&(_lock)->dep_m    521                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
455                 ____err = lock(_lock);            522                 ____err = lock(_lock);                          \
456         }                                         523         }                                                       \
457         if (!____err)                             524         if (!____err)                                           \
458                 lock_acquired(&(_lock)->dep_ma    525                 lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
459         ____err;                                  526         ____err;                                                \
460 })                                                527 })
461                                                   528 
462 #else /* CONFIG_LOCK_STAT */                      529 #else /* CONFIG_LOCK_STAT */
463                                                   530 
464 #define lock_contended(lockdep_map, ip) do {}     531 #define lock_contended(lockdep_map, ip) do {} while (0)
465 #define lock_acquired(lockdep_map, ip) do {} w    532 #define lock_acquired(lockdep_map, ip) do {} while (0)
466                                                   533 
467 #define LOCK_CONTENDED(_lock, try, lock) \        534 #define LOCK_CONTENDED(_lock, try, lock) \
468         lock(_lock)                               535         lock(_lock)
469                                                   536 
470 #define LOCK_CONTENDED_RETURN(_lock, try, lock    537 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
471         lock(_lock)                               538         lock(_lock)
472                                                   539 
473 #endif /* CONFIG_LOCK_STAT */                     540 #endif /* CONFIG_LOCK_STAT */
474                                                   541 
                                                   >> 542 #ifdef CONFIG_LOCKDEP
                                                   >> 543 
                                                   >> 544 /*
                                                   >> 545  * On lockdep we dont want the hand-coded irq-enable of
                                                   >> 546  * _raw_*_lock_flags() code, because lockdep assumes
                                                   >> 547  * that interrupts are not re-enabled during lock-acquire:
                                                   >> 548  */
                                                   >> 549 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
                                                   >> 550         LOCK_CONTENDED((_lock), (try), (lock))
                                                   >> 551 
                                                   >> 552 #else /* CONFIG_LOCKDEP */
                                                   >> 553 
                                                   >> 554 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
                                                   >> 555         lockfl((_lock), (flags))
                                                   >> 556 
                                                   >> 557 #endif /* CONFIG_LOCKDEP */
                                                   >> 558 
475 #ifdef CONFIG_PROVE_LOCKING                       559 #ifdef CONFIG_PROVE_LOCKING
476 extern void print_irqtrace_events(struct task_    560 extern void print_irqtrace_events(struct task_struct *curr);
477 #else                                             561 #else
478 static inline void print_irqtrace_events(struc    562 static inline void print_irqtrace_events(struct task_struct *curr)
479 {                                                 563 {
480 }                                                 564 }
481 #endif                                            565 #endif
482                                                   566 
483 /* Variable used to make lockdep treat read_lo << 
484 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS      << 
485 extern unsigned int force_read_lock_recursive; << 
486 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ << 
487 #define force_read_lock_recursive 0            << 
488 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS * << 
489                                                << 
490 #ifdef CONFIG_LOCKDEP                          << 
491 extern bool read_lock_is_recursive(void);      << 
492 #else /* CONFIG_LOCKDEP */                     << 
493 /* If !LOCKDEP, the value is meaningless */    << 
494 #define read_lock_is_recursive() 0             << 
495 #endif                                         << 
496                                                << 
497 /*                                                567 /*
498  * For trivial one-depth nesting of a lock-cla    568  * For trivial one-depth nesting of a lock-class, the following
499  * global define can be used. (Subsystems with    569  * global define can be used. (Subsystems with multiple levels
500  * of nesting should define their own lock-nes    570  * of nesting should define their own lock-nesting subclasses.)
501  */                                               571  */
502 #define SINGLE_DEPTH_NESTING                      572 #define SINGLE_DEPTH_NESTING                    1
503                                                   573 
504 /*                                                574 /*
505  * Map the dependency ops to NOP or to real lo    575  * Map the dependency ops to NOP or to real lockdep ops, depending
506  * on the per lock-class debug mode:              576  * on the per lock-class debug mode:
507  */                                               577  */
508                                                   578 
509 #define lock_acquire_exclusive(l, s, t, n, i)     579 #define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
510 #define lock_acquire_shared(l, s, t, n, i)        580 #define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
511 #define lock_acquire_shared_recursive(l, s, t,    581 #define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
512                                                   582 
513 #define spin_acquire(l, s, t, i)                  583 #define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
514 #define spin_acquire_nest(l, s, t, n, i)          584 #define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
515 #define spin_release(l, i)                     !! 585 #define spin_release(l, n, i)                   lock_release(l, n, i)
516                                                   586 
517 #define rwlock_acquire(l, s, t, i)                587 #define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
518 #define rwlock_acquire_read(l, s, t, i)        !! 588 #define rwlock_acquire_read(l, s, t, i)         lock_acquire_shared_recursive(l, s, t, NULL, i)
519 do {                                           !! 589 #define rwlock_release(l, n, i)                 lock_release(l, n, i)
520         if (read_lock_is_recursive())          << 
521                 lock_acquire_shared_recursive( << 
522         else                                   << 
523                 lock_acquire_shared(l, s, t, N << 
524 } while (0)                                    << 
525                                                << 
526 #define rwlock_release(l, i)                   << 
527                                                   590 
528 #define seqcount_acquire(l, s, t, i)              591 #define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
529 #define seqcount_acquire_read(l, s, t, i)         592 #define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
530 #define seqcount_release(l, i)                 !! 593 #define seqcount_release(l, n, i)               lock_release(l, n, i)
531                                                   594 
532 #define mutex_acquire(l, s, t, i)                 595 #define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
533 #define mutex_acquire_nest(l, s, t, n, i)         596 #define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
534 #define mutex_release(l, i)                    !! 597 #define mutex_release(l, n, i)                  lock_release(l, n, i)
535                                                   598 
536 #define rwsem_acquire(l, s, t, i)                 599 #define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
537 #define rwsem_acquire_nest(l, s, t, n, i)         600 #define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
538 #define rwsem_acquire_read(l, s, t, i)            601 #define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
539 #define rwsem_release(l, i)                    !! 602 #define rwsem_release(l, n, i)                  lock_release(l, n, i)
540                                                   603 
541 #define lock_map_acquire(l)                       604 #define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
542 #define lock_map_acquire_try(l)                << 
543 #define lock_map_acquire_read(l)                  605 #define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
544 #define lock_map_acquire_tryread(l)               606 #define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
545 #define lock_map_release(l)                    !! 607 #define lock_map_release(l)                     lock_release(l, 1, _THIS_IP_)
546 #define lock_map_sync(l)                       << 
547                                                   608 
548 #ifdef CONFIG_PROVE_LOCKING                       609 #ifdef CONFIG_PROVE_LOCKING
549 # define might_lock(lock)                      !! 610 # define might_lock(lock)                                               \
550 do {                                              611 do {                                                                    \
551         typecheck(struct lockdep_map *, &(lock    612         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
552         lock_acquire(&(lock)->dep_map, 0, 0, 0    613         lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
553         lock_release(&(lock)->dep_map, _THIS_I !! 614         lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
554 } while (0)                                       615 } while (0)
555 # define might_lock_read(lock)                 !! 616 # define might_lock_read(lock)                                          \
556 do {                                              617 do {                                                                    \
557         typecheck(struct lockdep_map *, &(lock    618         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
558         lock_acquire(&(lock)->dep_map, 0, 0, 1    619         lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
559         lock_release(&(lock)->dep_map, _THIS_I !! 620         lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
560 } while (0)                                    << 
561 # define might_lock_nested(lock, subclass)     << 
562 do {                                           << 
563         typecheck(struct lockdep_map *, &(lock << 
564         lock_acquire(&(lock)->dep_map, subclas << 
565                      _THIS_IP_);               << 
566         lock_release(&(lock)->dep_map, _THIS_I << 
567 } while (0)                                    << 
568                                                << 
569 DECLARE_PER_CPU(int, hardirqs_enabled);        << 
570 DECLARE_PER_CPU(int, hardirq_context);         << 
571 DECLARE_PER_CPU(unsigned int, lockdep_recursio << 
572                                                << 
573 #define __lockdep_enabled       (debug_locks & << 
574                                                << 
575 #define lockdep_assert_irqs_enabled()          << 
576 do {                                           << 
577         WARN_ON_ONCE(__lockdep_enabled && !thi << 
578 } while (0)                                    << 
579                                                << 
580 #define lockdep_assert_irqs_disabled()         << 
581 do {                                           << 
582         WARN_ON_ONCE(__lockdep_enabled && this << 
583 } while (0)                                    << 
584                                                << 
585 #define lockdep_assert_in_irq()                << 
586 do {                                           << 
587         WARN_ON_ONCE(__lockdep_enabled && !thi << 
588 } while (0)                                    << 
589                                                << 
590 #define lockdep_assert_no_hardirq()            << 
591 do {                                           << 
592         WARN_ON_ONCE(__lockdep_enabled && (thi << 
593                                            !th << 
594 } while (0)                                       621 } while (0)
595                                                   622 
596 #define lockdep_assert_preemption_enabled()    !! 623 #define lockdep_assert_irqs_enabled()   do {                            \
597 do {                                           !! 624                 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
598         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT !! 625                           !current->hardirqs_enabled,                   \
599                      __lockdep_enabled         !! 626                           "IRQs not enabled as expected\n");            \
600                      (preempt_count() != 0     !! 627         } while (0)
601                       !this_cpu_read(hardirqs_ << 
602 } while (0)                                    << 
603                                                << 
604 #define lockdep_assert_preemption_disabled()   << 
605 do {                                           << 
606         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT << 
607                      __lockdep_enabled         << 
608                      (preempt_count() == 0     << 
609                       this_cpu_read(hardirqs_e << 
610 } while (0)                                    << 
611                                                << 
612 /*                                             << 
613  * Acceptable for protecting per-CPU resources << 
614  * Much like in_softirq() - semantics are ambi << 
615  */                                            << 
616 #define lockdep_assert_in_softirq()            << 
617 do {                                           << 
618         WARN_ON_ONCE(__lockdep_enabled         << 
619                      (!in_softirq() || in_irq( << 
620 } while (0)                                    << 
621                                                   628 
622 extern void lockdep_assert_in_softirq_func(voi !! 629 #define lockdep_assert_irqs_disabled()  do {                            \
                                                   >> 630                 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
                                                   >> 631                           current->hardirqs_enabled,                    \
                                                   >> 632                           "IRQs not disabled as expected\n");           \
                                                   >> 633         } while (0)
623                                                   634 
624 #else                                             635 #else
625 # define might_lock(lock) do { } while (0)        636 # define might_lock(lock) do { } while (0)
626 # define might_lock_read(lock) do { } while (0    637 # define might_lock_read(lock) do { } while (0)
627 # define might_lock_nested(lock, subclass) do  << 
628                                                << 
629 # define lockdep_assert_irqs_enabled() do { }     638 # define lockdep_assert_irqs_enabled() do { } while (0)
630 # define lockdep_assert_irqs_disabled() do { }    639 # define lockdep_assert_irqs_disabled() do { } while (0)
631 # define lockdep_assert_in_irq() do { } while  << 
632 # define lockdep_assert_no_hardirq() do { } wh << 
633                                                << 
634 # define lockdep_assert_preemption_enabled() d << 
635 # define lockdep_assert_preemption_disabled()  << 
636 # define lockdep_assert_in_softirq() do { } wh << 
637 # define lockdep_assert_in_softirq_func() do { << 
638 #endif                                         << 
639                                                << 
640 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING           << 
641                                                << 
642 # define lockdep_assert_RT_in_threaded_ctx() d << 
643                 WARN_ONCE(debug_locks && !curr << 
644                           lockdep_hardirq_cont << 
645                           !(current->hardirq_t << 
646                           "Not in threaded con << 
647 } while (0)                                    << 
648                                                << 
649 #else                                          << 
650                                                << 
651 # define lockdep_assert_RT_in_threaded_ctx() d << 
652                                                << 
653 #endif                                            640 #endif
654                                                   641 
655 #ifdef CONFIG_LOCKDEP                             642 #ifdef CONFIG_LOCKDEP
656 void lockdep_rcu_suspicious(const char *file,     643 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
657 #else                                             644 #else
658 static inline void                                645 static inline void
659 lockdep_rcu_suspicious(const char *file, const    646 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
660 {                                                 647 {
661 }                                                 648 }
662 #endif                                            649 #endif
663                                                   650 
664 #endif /* __LINUX_LOCKDEP_H */                    651 #endif /* __LINUX_LOCKDEP_H */
665                                                   652 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php