1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H 3 #define _ASM_POWERPC_SIMPLE_SPINLOCK_H 4 5 /* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * (the type definitions are in asm/simple_spinlock_types.h) 16 */ 17 #include <linux/irqflags.h> 18 #include <linux/kcsan-checks.h> 19 #include <asm/paravirt.h> 20 #include <asm/paca.h> 21 #include <asm/synch.h> 22 #include <asm/ppc-opcode.h> 23 24 #ifdef CONFIG_PPC64 25 /* use 0x800000yy when locked, where yy == CPU number */ 26 #ifdef __BIG_ENDIAN__ 27 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 28 #else 29 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 30 #endif 31 #else 32 #define LOCK_TOKEN 1 33 #endif 34 35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 36 { 37 return lock.slock == 0; 38 } 39 40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 41 { 42 return !arch_spin_value_unlocked(READ_ONCE(*lock)); 43 } 44 45 /* 46 * This returns the old value in the lock, so we succeeded 47 * in getting the lock if the return value is 0. 48 */ 49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 50 { 51 unsigned long tmp, token; 52 unsigned int eh = IS_ENABLED(CONFIG_PPC64); 53 54 token = LOCK_TOKEN; 55 __asm__ __volatile__( 56 "1: lwarx %0,0,%2,%[eh]\n\ 57 cmpwi 0,%0,0\n\ 58 bne- 2f\n\ 59 stwcx. %1,0,%2\n\ 60 bne- 1b\n" 61 PPC_ACQUIRE_BARRIER 62 "2:" 63 : "=&r" (tmp) 64 : "r" (token), "r" (&lock->slock), [eh] "n" (eh) 65 : "cr0", "memory"); 66 67 return tmp; 68 } 69 70 static inline int arch_spin_trylock(arch_spinlock_t *lock) 71 { 72 return __arch_spin_trylock(lock) == 0; 73 } 74 75 /* 76 * On a system with shared processors (that is, where a physical 77 * processor is multiplexed between several virtual processors), 78 * there is no point spinning on a lock if the holder of the lock 79 * isn't currently scheduled on a physical processor. Instead 80 * we detect this situation and ask the hypervisor to give the 81 * rest of our timeslice to the lock holder. 82 * 83 * So that we can tell which virtual processor is holding a lock, 84 * we put 0x80000000 | smp_processor_id() in the lock when it is 85 * held. Conveniently, we have a word in the paca that holds this 86 * value. 87 */ 88 89 #if defined(CONFIG_PPC_SPLPAR) 90 /* We only yield to the hypervisor if we are in shared processor mode */ 91 void splpar_spin_yield(arch_spinlock_t *lock); 92 void splpar_rw_yield(arch_rwlock_t *lock); 93 #else /* SPLPAR */ 94 static inline void splpar_spin_yield(arch_spinlock_t *lock) {} 95 static inline void splpar_rw_yield(arch_rwlock_t *lock) {} 96 #endif 97 98 static inline void spin_yield(arch_spinlock_t *lock) 99 { 100 if (is_shared_processor()) 101 splpar_spin_yield(lock); 102 else 103 barrier(); 104 } 105 106 static inline void rw_yield(arch_rwlock_t *lock) 107 { 108 if (is_shared_processor()) 109 splpar_rw_yield(lock); 110 else 111 barrier(); 112 } 113 114 static inline void arch_spin_lock(arch_spinlock_t *lock) 115 { 116 while (1) { 117 if (likely(__arch_spin_trylock(lock) == 0)) 118 break; 119 do { 120 HMT_low(); 121 if (is_shared_processor()) 122 splpar_spin_yield(lock); 123 } while (unlikely(lock->slock != 0)); 124 HMT_medium(); 125 } 126 } 127 128 static inline void arch_spin_unlock(arch_spinlock_t *lock) 129 { 130 kcsan_mb(); 131 __asm__ __volatile__("# arch_spin_unlock\n\t" 132 PPC_RELEASE_BARRIER: : :"memory"); 133 lock->slock = 0; 134 } 135 136 /* 137 * Read-write spinlocks, allowing multiple readers 138 * but only one writer. 139 * 140 * NOTE! it is quite common to have readers in interrupts 141 * but no interrupt writers. For those circumstances we 142 * can "mix" irq-safe locks - any writer needs to get a 143 * irq-safe write-lock, but readers can get non-irqsafe 144 * read-locks. 145 */ 146 147 #ifdef CONFIG_PPC64 148 #define __DO_SIGN_EXTEND "extsw %0,%0\n" 149 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 150 #else 151 #define __DO_SIGN_EXTEND 152 #define WRLOCK_TOKEN (-1) 153 #endif 154 155 /* 156 * This returns the old value in the lock + 1, 157 * so we got a read lock if the return value is > 0. 158 */ 159 static inline long __arch_read_trylock(arch_rwlock_t *rw) 160 { 161 long tmp; 162 unsigned int eh = IS_ENABLED(CONFIG_PPC64); 163 164 __asm__ __volatile__( 165 "1: lwarx %0,0,%1,%[eh]\n" 166 __DO_SIGN_EXTEND 167 " addic. %0,%0,1\n\ 168 ble- 2f\n" 169 " stwcx. %0,0,%1\n\ 170 bne- 1b\n" 171 PPC_ACQUIRE_BARRIER 172 "2:" : "=&r" (tmp) 173 : "r" (&rw->lock), [eh] "n" (eh) 174 : "cr0", "xer", "memory"); 175 176 return tmp; 177 } 178 179 /* 180 * This returns the old value in the lock, 181 * so we got the write lock if the return value is 0. 182 */ 183 static inline long __arch_write_trylock(arch_rwlock_t *rw) 184 { 185 long tmp, token; 186 unsigned int eh = IS_ENABLED(CONFIG_PPC64); 187 188 token = WRLOCK_TOKEN; 189 __asm__ __volatile__( 190 "1: lwarx %0,0,%2,%[eh]\n\ 191 cmpwi 0,%0,0\n\ 192 bne- 2f\n" 193 " stwcx. %1,0,%2\n\ 194 bne- 1b\n" 195 PPC_ACQUIRE_BARRIER 196 "2:" : "=&r" (tmp) 197 : "r" (token), "r" (&rw->lock), [eh] "n" (eh) 198 : "cr0", "memory"); 199 200 return tmp; 201 } 202 203 static inline void arch_read_lock(arch_rwlock_t *rw) 204 { 205 while (1) { 206 if (likely(__arch_read_trylock(rw) > 0)) 207 break; 208 do { 209 HMT_low(); 210 if (is_shared_processor()) 211 splpar_rw_yield(rw); 212 } while (unlikely(rw->lock < 0)); 213 HMT_medium(); 214 } 215 } 216 217 static inline void arch_write_lock(arch_rwlock_t *rw) 218 { 219 while (1) { 220 if (likely(__arch_write_trylock(rw) == 0)) 221 break; 222 do { 223 HMT_low(); 224 if (is_shared_processor()) 225 splpar_rw_yield(rw); 226 } while (unlikely(rw->lock != 0)); 227 HMT_medium(); 228 } 229 } 230 231 static inline int arch_read_trylock(arch_rwlock_t *rw) 232 { 233 return __arch_read_trylock(rw) > 0; 234 } 235 236 static inline int arch_write_trylock(arch_rwlock_t *rw) 237 { 238 return __arch_write_trylock(rw) == 0; 239 } 240 241 static inline void arch_read_unlock(arch_rwlock_t *rw) 242 { 243 long tmp; 244 245 __asm__ __volatile__( 246 "# read_unlock\n\t" 247 PPC_RELEASE_BARRIER 248 "1: lwarx %0,0,%1\n\ 249 addic %0,%0,-1\n" 250 " stwcx. %0,0,%1\n\ 251 bne- 1b" 252 : "=&r"(tmp) 253 : "r"(&rw->lock) 254 : "cr0", "xer", "memory"); 255 } 256 257 static inline void arch_write_unlock(arch_rwlock_t *rw) 258 { 259 __asm__ __volatile__("# write_unlock\n\t" 260 PPC_RELEASE_BARRIER: : :"memory"); 261 rw->lock = 0; 262 } 263 264 #define arch_spin_relax(lock) spin_yield(lock) 265 #define arch_read_relax(lock) rw_yield(lock) 266 #define arch_write_relax(lock) rw_yield(lock) 267 268 #endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */ 269
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.