1 /* SPDX-License-Identifier: GPL-2.0 */ !! 1 #ifndef _ALPHA_SPINLOCK_H >> 2 #define _ALPHA_SPINLOCK_H >> 3 >> 4 #include <linux/config.h> >> 5 #include <asm/system.h> >> 6 #include <linux/kernel.h> >> 7 #include <asm/current.h> >> 8 2 9 3 /* 10 /* 4 * 'Generic' ticket-lock implementation. !! 11 * Simple spin lock operations. There are two variants, one clears IRQ's 5 * !! 12 * on the local processor, one does not. 6 * It relies on atomic_fetch_add() having well << 7 * guarantees under contention. If your archit << 8 * to a test-and-set lock. << 9 * << 10 * It also relies on atomic_fetch_add() being << 11 * sub-word of the value. This is generally tr << 12 * you'd be hard pressed to find anything usef << 13 * about this. If your architecture cannot do << 14 * a test-and-set. << 15 * << 16 * It further assumes atomic_*_release() + ato << 17 * uses atomic_fetch_add() which is RCsc to cr << 18 * a full fence after the spin to upgrade the << 19 * atomic_cond_read_acquire(). << 20 * << 21 * The implementation uses smp_cond_load_acqui << 22 * architecture has WFE like instructions to s << 23 * modifications be sure to implement that (se << 24 * 13 * >> 14 * We make no fairness assumptions. They have a cost. 25 */ 15 */ 26 16 27 #ifndef __ASM_GENERIC_SPINLOCK_H !! 17 typedef struct { 28 #define __ASM_GENERIC_SPINLOCK_H !! 18 volatile unsigned int lock /*__attribute__((aligned(32))) */; 29 !! 19 #if CONFIG_DEBUG_SPINLOCK 30 #include <linux/atomic.h> !! 20 int on_cpu; 31 #include <asm-generic/spinlock_types.h> !! 21 int line_no; 32 !! 22 void *previous; 33 static __always_inline void arch_spin_lock(arc !! 23 struct task_struct * task; 34 { !! 24 const char *base_file; 35 u32 val = atomic_fetch_add(1<<16, lock !! 25 #endif 36 u16 ticket = val >> 16; !! 26 } spinlock_t; 37 !! 27 38 if (ticket == (u16)val) !! 28 #if CONFIG_DEBUG_SPINLOCK 39 return; !! 29 #define SPIN_LOCK_UNLOCKED (spinlock_t) {0, -1, 0, 0, 0, 0} 40 !! 30 #define spin_lock_init(x) \ 41 /* !! 31 ((x)->lock = 0, (x)->on_cpu = -1, (x)->previous = 0, (x)->task = 0) 42 * atomic_cond_read_acquire() is RCpc, !! 32 #else 43 * custom cond_read_rcsc() here we jus !! 33 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } 44 * need the prior reads before subsequ !! 34 #define spin_lock_init(x) ((x)->lock = 0) 45 * smb_mb(), but as atomic_cond_read_a !! 35 #endif 46 * have no outstanding writes due to t !! 36 47 * orderings are free. !! 37 #define spin_is_locked(x) ((x)->lock != 0) 48 */ !! 38 #define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); }) 49 atomic_cond_read_acquire(lock, ticket !! 39 50 smp_mb(); !! 40 #if CONFIG_DEBUG_SPINLOCK 51 } !! 41 extern void spin_unlock(spinlock_t * lock); 52 !! 42 extern void debug_spin_lock(spinlock_t * lock, const char *, int); 53 static __always_inline bool arch_spin_trylock( !! 43 extern int debug_spin_trylock(spinlock_t * lock, const char *, int); 54 { !! 44 55 u32 old = atomic_read(lock); !! 45 #define spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__) 56 !! 46 #define spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__) 57 if ((old >> 16) != (old & 0xffff)) !! 47 58 return false; !! 48 #define spin_lock_own(LOCK, LOCATION) \ 59 !! 49 do { \ 60 return atomic_try_cmpxchg(lock, &old, !! 50 if (!((LOCK)->lock && (LOCK)->on_cpu == smp_processor_id())) \ >> 51 printk("%s: called on %d from %p but lock %s on %d\n", \ >> 52 LOCATION, smp_processor_id(), \ >> 53 __builtin_return_address(0), \ >> 54 (LOCK)->lock ? "taken" : "freed", (LOCK)->on_cpu); \ >> 55 } while (0) >> 56 #else >> 57 static inline void spin_unlock(spinlock_t * lock) >> 58 { >> 59 mb(); >> 60 lock->lock = 0; >> 61 } >> 62 >> 63 static inline void spin_lock(spinlock_t * lock) >> 64 { >> 65 long tmp; >> 66 >> 67 /* Use sub-sections to put the actual loop at the end >> 68 of this object file's text section so as to perfect >> 69 branch prediction. */ >> 70 __asm__ __volatile__( >> 71 "1: ldl_l %0,%1\n" >> 72 " blbs %0,2f\n" >> 73 " or %0,1,%0\n" >> 74 " stl_c %0,%1\n" >> 75 " beq %0,2f\n" >> 76 " mb\n" >> 77 ".subsection 2\n" >> 78 "2: ldl %0,%1\n" >> 79 " blbs %0,2b\n" >> 80 " br 1b\n" >> 81 ".previous" >> 82 : "=&r" (tmp), "=m" (lock->lock) >> 83 : "m"(lock->lock) : "memory"); >> 84 } >> 85 >> 86 #define spin_trylock(lock) (!test_and_set_bit(0,(lock))) >> 87 #define spin_lock_own(LOCK, LOCATION) ((void)0) >> 88 #endif /* CONFIG_DEBUG_SPINLOCK */ >> 89 >> 90 /***********************************************************/ >> 91 >> 92 typedef struct { >> 93 volatile int write_lock:1, read_counter:31; >> 94 } /*__attribute__((aligned(32)))*/ rwlock_t; >> 95 >> 96 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } >> 97 >> 98 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) >> 99 >> 100 #if CONFIG_DEBUG_RWLOCK >> 101 extern void write_lock(rwlock_t * lock); >> 102 extern void read_lock(rwlock_t * lock); >> 103 #else >> 104 static inline void write_lock(rwlock_t * lock) >> 105 { >> 106 long regx; >> 107 >> 108 __asm__ __volatile__( >> 109 "1: ldl_l %1,%0\n" >> 110 " bne %1,6f\n" >> 111 " or $31,1,%1\n" >> 112 " stl_c %1,%0\n" >> 113 " beq %1,6f\n" >> 114 " mb\n" >> 115 ".subsection 2\n" >> 116 "6: ldl %1,%0\n" >> 117 " bne %1,6b\n" >> 118 " br 1b\n" >> 119 ".previous" >> 120 : "=m" (*(volatile int *)lock), "=&r" (regx) >> 121 : "" (*(volatile int *)lock) : "memory"); >> 122 } >> 123 >> 124 static inline void read_lock(rwlock_t * lock) >> 125 { >> 126 long regx; >> 127 >> 128 __asm__ __volatile__( >> 129 "1: ldl_l %1,%0\n" >> 130 " blbs %1,6f\n" >> 131 " subl %1,2,%1\n" >> 132 " stl_c %1,%0\n" >> 133 " beq %1,6f\n" >> 134 "4: mb\n" >> 135 ".subsection 2\n" >> 136 "6: ldl %1,%0\n" >> 137 " blbs %1,6b\n" >> 138 " br 1b\n" >> 139 ".previous" >> 140 : "=m" (*(volatile int *)lock), "=&r" (regx) >> 141 : "m" (*(volatile int *)lock) : "memory"); >> 142 } >> 143 #endif /* CONFIG_DEBUG_RWLOCK */ >> 144 >> 145 static inline void write_unlock(rwlock_t * lock) >> 146 { >> 147 mb(); >> 148 *(volatile int *)lock = 0; >> 149 } >> 150 >> 151 static inline void read_unlock(rwlock_t * lock) >> 152 { >> 153 long regx; >> 154 __asm__ __volatile__( >> 155 " mb\n" >> 156 "1: ldl_l %1,%0\n" >> 157 " addl %1,2,%1\n" >> 158 " stl_c %1,%0\n" >> 159 " beq %1,6f\n" >> 160 ".subsection 2\n" >> 161 "6: br 1b\n" >> 162 ".previous" >> 163 : "=m" (*(volatile int *)lock), "=&r" (regx) >> 164 : "m" (*(volatile int *)lock) : "memory"); 61 } 165 } 62 166 63 static __always_inline void arch_spin_unlock(a !! 167 #endif /* _ALPHA_SPINLOCK_H */ 64 { << 65 u16 *ptr = (u16 *)lock + IS_ENABLED(CO << 66 u32 val = atomic_read(lock); << 67 << 68 smp_store_release(ptr, (u16)val + 1); << 69 } << 70 << 71 static __always_inline int arch_spin_value_unl << 72 { << 73 u32 val = lock.counter; << 74 << 75 return ((val >> 16) == (val & 0xffff)) << 76 } << 77 << 78 static __always_inline int arch_spin_is_locked << 79 { << 80 arch_spinlock_t val = READ_ONCE(*lock) << 81 << 82 return !arch_spin_value_unlocked(val); << 83 } << 84 << 85 static __always_inline int arch_spin_is_conten << 86 { << 87 u32 val = atomic_read(lock); << 88 << 89 return (s16)((val >> 16) - (val & 0xff << 90 } << 91 << 92 #include <asm/qrwlock.h> << 93 << 94 #endif /* __ASM_GENERIC_SPINLOCK_H */ << 95 168
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.