1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_LOCAL_LOCK_H 2 #ifndef _LINUX_LOCAL_LOCK_H 3 # error "Do not include directly, include linu 3 # error "Do not include directly, include linux/local_lock.h" 4 #endif 4 #endif 5 5 6 #include <linux/percpu-defs.h> 6 #include <linux/percpu-defs.h> 7 #include <linux/lockdep.h> 7 #include <linux/lockdep.h> 8 8 9 #ifndef CONFIG_PREEMPT_RT 9 #ifndef CONFIG_PREEMPT_RT 10 10 11 typedef struct { 11 typedef struct { 12 #ifdef CONFIG_DEBUG_LOCK_ALLOC 12 #ifdef CONFIG_DEBUG_LOCK_ALLOC 13 struct lockdep_map dep_map; 13 struct lockdep_map dep_map; 14 struct task_struct *owner; 14 struct task_struct *owner; 15 #endif 15 #endif 16 } local_lock_t; 16 } local_lock_t; 17 17 18 #ifdef CONFIG_DEBUG_LOCK_ALLOC 18 #ifdef CONFIG_DEBUG_LOCK_ALLOC 19 # define LOCAL_LOCK_DEBUG_INIT(lockname) 19 # define LOCAL_LOCK_DEBUG_INIT(lockname) \ 20 .dep_map = { 20 .dep_map = { \ 21 .name = #lockname, 21 .name = #lockname, \ 22 .wait_type_inner = LD_WAIT_CON 22 .wait_type_inner = LD_WAIT_CONFIG, \ 23 .lock_type = LD_LOCK_PERCPU, 23 .lock_type = LD_LOCK_PERCPU, \ 24 }, 24 }, \ 25 .owner = NULL, 25 .owner = NULL, 26 26 27 static inline void local_lock_acquire(local_lo 27 static inline void local_lock_acquire(local_lock_t *l) 28 { 28 { 29 lock_map_acquire(&l->dep_map); 29 lock_map_acquire(&l->dep_map); 30 DEBUG_LOCKS_WARN_ON(l->owner); 30 DEBUG_LOCKS_WARN_ON(l->owner); 31 l->owner = current; 31 l->owner = current; 32 } 32 } 33 33 34 static inline void local_lock_release(local_lo 34 static inline void local_lock_release(local_lock_t *l) 35 { 35 { 36 DEBUG_LOCKS_WARN_ON(l->owner != curren 36 DEBUG_LOCKS_WARN_ON(l->owner != current); 37 l->owner = NULL; 37 l->owner = NULL; 38 lock_map_release(&l->dep_map); 38 lock_map_release(&l->dep_map); 39 } 39 } 40 40 41 static inline void local_lock_debug_init(local 41 static inline void local_lock_debug_init(local_lock_t *l) 42 { 42 { 43 l->owner = NULL; 43 l->owner = NULL; 44 } 44 } 45 #else /* CONFIG_DEBUG_LOCK_ALLOC */ 45 #else /* CONFIG_DEBUG_LOCK_ALLOC */ 46 # define LOCAL_LOCK_DEBUG_INIT(lockname) 46 # define LOCAL_LOCK_DEBUG_INIT(lockname) 47 static inline void local_lock_acquire(local_lo 47 static inline void local_lock_acquire(local_lock_t *l) { } 48 static inline void local_lock_release(local_lo 48 static inline void local_lock_release(local_lock_t *l) { } 49 static inline void local_lock_debug_init(local 49 static inline void local_lock_debug_init(local_lock_t *l) { } 50 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 50 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 51 51 52 #define INIT_LOCAL_LOCK(lockname) { LOCA 52 #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } 53 53 54 #define __local_lock_init(lock) 54 #define __local_lock_init(lock) \ 55 do { 55 do { \ 56 static struct lock_class_key __key; 56 static struct lock_class_key __key; \ 57 57 \ 58 debug_check_no_locks_freed((void *)loc 58 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 59 lockdep_init_map_type(&(lock)->dep_map 59 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 60 0, LD_WAIT_CONFI 60 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ 61 LD_LOCK_PERCPU); 61 LD_LOCK_PERCPU); \ 62 local_lock_debug_init(lock); 62 local_lock_debug_init(lock); \ 63 } while (0) 63 } while (0) 64 64 65 #define __spinlock_nested_bh_init(lock) 65 #define __spinlock_nested_bh_init(lock) \ 66 do { 66 do { \ 67 static struct lock_class_key __key; 67 static struct lock_class_key __key; \ 68 68 \ 69 debug_check_no_locks_freed((void *)loc 69 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 70 lockdep_init_map_type(&(lock)->dep_map 70 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 71 0, LD_WAIT_CONFI 71 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ 72 LD_LOCK_NORMAL); 72 LD_LOCK_NORMAL); \ 73 local_lock_debug_init(lock); 73 local_lock_debug_init(lock); \ 74 } while (0) 74 } while (0) 75 75 76 #define __local_lock(lock) 76 #define __local_lock(lock) \ 77 do { 77 do { \ 78 preempt_disable(); 78 preempt_disable(); \ 79 local_lock_acquire(this_cpu_pt 79 local_lock_acquire(this_cpu_ptr(lock)); \ 80 } while (0) 80 } while (0) 81 81 82 #define __local_lock_irq(lock) 82 #define __local_lock_irq(lock) \ 83 do { 83 do { \ 84 local_irq_disable(); 84 local_irq_disable(); \ 85 local_lock_acquire(this_cpu_pt 85 local_lock_acquire(this_cpu_ptr(lock)); \ 86 } while (0) 86 } while (0) 87 87 88 #define __local_lock_irqsave(lock, flags) 88 #define __local_lock_irqsave(lock, flags) \ 89 do { 89 do { \ 90 local_irq_save(flags); 90 local_irq_save(flags); \ 91 local_lock_acquire(this_cpu_pt 91 local_lock_acquire(this_cpu_ptr(lock)); \ 92 } while (0) 92 } while (0) 93 93 94 #define __local_unlock(lock) 94 #define __local_unlock(lock) \ 95 do { 95 do { \ 96 local_lock_release(this_cpu_pt 96 local_lock_release(this_cpu_ptr(lock)); \ 97 preempt_enable(); 97 preempt_enable(); \ 98 } while (0) 98 } while (0) 99 99 100 #define __local_unlock_irq(lock) 100 #define __local_unlock_irq(lock) \ 101 do { 101 do { \ 102 local_lock_release(this_cpu_pt 102 local_lock_release(this_cpu_ptr(lock)); \ 103 local_irq_enable(); 103 local_irq_enable(); \ 104 } while (0) 104 } while (0) 105 105 106 #define __local_unlock_irqrestore(lock, flags) 106 #define __local_unlock_irqrestore(lock, flags) \ 107 do { 107 do { \ 108 local_lock_release(this_cpu_pt 108 local_lock_release(this_cpu_ptr(lock)); \ 109 local_irq_restore(flags); 109 local_irq_restore(flags); \ 110 } while (0) 110 } while (0) 111 111 112 #define __local_lock_nested_bh(lock) 112 #define __local_lock_nested_bh(lock) \ 113 do { 113 do { \ 114 lockdep_assert_in_softirq(); 114 lockdep_assert_in_softirq(); \ 115 local_lock_acquire(this_cpu_pt 115 local_lock_acquire(this_cpu_ptr(lock)); \ 116 } while (0) 116 } while (0) 117 117 118 #define __local_unlock_nested_bh(lock) 118 #define __local_unlock_nested_bh(lock) \ 119 local_lock_release(this_cpu_ptr(lock)) 119 local_lock_release(this_cpu_ptr(lock)) 120 120 121 #else /* !CONFIG_PREEMPT_RT */ 121 #else /* !CONFIG_PREEMPT_RT */ 122 122 123 /* 123 /* 124 * On PREEMPT_RT local_lock maps to a per CPU 124 * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the 125 * critical section while staying preemptible. 125 * critical section while staying preemptible. 126 */ 126 */ 127 typedef spinlock_t local_lock_t; 127 typedef spinlock_t local_lock_t; 128 128 129 #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN 129 #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) 130 130 131 #define __local_lock_init(l) 131 #define __local_lock_init(l) \ 132 do { 132 do { \ 133 local_spin_lock_init((l)); 133 local_spin_lock_init((l)); \ 134 } while (0) 134 } while (0) 135 135 136 #define __local_lock(__lock) 136 #define __local_lock(__lock) \ 137 do { 137 do { \ 138 migrate_disable(); 138 migrate_disable(); \ 139 spin_lock(this_cpu_ptr((__lock 139 spin_lock(this_cpu_ptr((__lock))); \ 140 } while (0) 140 } while (0) 141 141 142 #define __local_lock_irq(lock) 142 #define __local_lock_irq(lock) __local_lock(lock) 143 143 144 #define __local_lock_irqsave(lock, flags) 144 #define __local_lock_irqsave(lock, flags) \ 145 do { 145 do { \ 146 typecheck(unsigned long, flags 146 typecheck(unsigned long, flags); \ 147 flags = 0; 147 flags = 0; \ 148 __local_lock(lock); 148 __local_lock(lock); \ 149 } while (0) 149 } while (0) 150 150 151 #define __local_unlock(__lock) 151 #define __local_unlock(__lock) \ 152 do { 152 do { \ 153 spin_unlock(this_cpu_ptr((__lo 153 spin_unlock(this_cpu_ptr((__lock))); \ 154 migrate_enable(); 154 migrate_enable(); \ 155 } while (0) 155 } while (0) 156 156 157 #define __local_unlock_irq(lock) 157 #define __local_unlock_irq(lock) __local_unlock(lock) 158 158 159 #define __local_unlock_irqrestore(lock, flags) 159 #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) 160 160 161 #define __local_lock_nested_bh(lock) 161 #define __local_lock_nested_bh(lock) \ 162 do { 162 do { \ 163 lockdep_assert_in_softirq_func(); 163 lockdep_assert_in_softirq_func(); \ 164 spin_lock(this_cpu_ptr(lock)); 164 spin_lock(this_cpu_ptr(lock)); \ 165 } while (0) 165 } while (0) 166 166 167 #define __local_unlock_nested_bh(lock) 167 #define __local_unlock_nested_bh(lock) \ 168 do { 168 do { \ 169 spin_unlock(this_cpu_ptr((lock))); 169 spin_unlock(this_cpu_ptr((lock))); \ 170 } while (0) 170 } while (0) 171 171 172 #endif /* CONFIG_PREEMPT_RT */ 172 #endif /* CONFIG_PREEMPT_RT */ 173 173
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.