1 /* 2 * Atomic xchg and cmpxchg operations. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2001 - 2005 Tensilica Inc. 9 */ 10 11 #ifndef _XTENSA_CMPXCHG_H 12 #define _XTENSA_CMPXCHG_H 13 14 #ifndef __ASSEMBLY__ 15 16 #include <linux/bits.h> 17 #include <linux/stringify.h> 18 19 /* 20 * cmpxchg 21 */ 22 23 static inline unsigned long 24 __cmpxchg_u32(volatile int *p, int old, int new) 25 { 26 #if XCHAL_HAVE_EXCLUSIVE 27 unsigned long tmp, result; 28 29 __asm__ __volatile__( 30 "1: l32ex %[result], %[addr]\n" 31 " bne %[result], %[cmp], 2f\n" 32 " mov %[tmp], %[new]\n" 33 " s32ex %[tmp], %[addr]\n" 34 " getex %[tmp]\n" 35 " beqz %[tmp], 1b\n" 36 "2:\n" 37 : [result] "=&a" (result), [tmp] "=&a" (tmp) 38 : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old) 39 : "memory" 40 ); 41 42 return result; 43 #elif XCHAL_HAVE_S32C1I 44 __asm__ __volatile__( 45 " wsr %[cmp], scompare1\n" 46 " s32c1i %[new], %[mem]\n" 47 : [new] "+a" (new), [mem] "+m" (*p) 48 : [cmp] "a" (old) 49 : "memory" 50 ); 51 52 return new; 53 #else 54 __asm__ __volatile__( 55 " rsil a14, "__stringify(TOPLEVEL)"\n" 56 " l32i %[old], %[mem]\n" 57 " bne %[old], %[cmp], 1f\n" 58 " s32i %[new], %[mem]\n" 59 "1:\n" 60 " wsr a14, ps\n" 61 " rsync\n" 62 : [old] "=&a" (old), [mem] "+m" (*p) 63 : [cmp] "a" (old), [new] "r" (new) 64 : "a14", "memory"); 65 return old; 66 #endif 67 } 68 /* This function doesn't exist, so you'll get a linker error 69 * if something tries to do an invalid cmpxchg(). */ 70 71 extern void __cmpxchg_called_with_bad_pointer(void); 72 73 static __inline__ unsigned long 74 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 75 { 76 switch (size) { 77 case 4: return __cmpxchg_u32(ptr, old, new); 78 default: __cmpxchg_called_with_bad_pointer(); 79 return old; 80 } 81 } 82 83 #define arch_cmpxchg(ptr,o,n) \ 84 ({ __typeof__(*(ptr)) _o_ = (o); \ 85 __typeof__(*(ptr)) _n_ = (n); \ 86 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 87 (unsigned long)_n_, sizeof (*(ptr))); \ 88 }) 89 90 #include <asm-generic/cmpxchg-local.h> 91 92 static inline unsigned long __cmpxchg_local(volatile void *ptr, 93 unsigned long old, 94 unsigned long new, int size) 95 { 96 switch (size) { 97 case 4: 98 return __cmpxchg_u32(ptr, old, new); 99 default: 100 return __generic_cmpxchg_local(ptr, old, new, size); 101 } 102 103 return old; 104 } 105 106 /* 107 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 108 * them available. 109 */ 110 #define arch_cmpxchg_local(ptr, o, n) \ 111 ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\ 112 (unsigned long)(n), sizeof(*(ptr)))) 113 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) 114 #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n)) 115 116 /* 117 * xchg_u32 118 * 119 * Note that a14 is used here because the register allocation 120 * done by the compiler is not guaranteed and a window overflow 121 * may not occur between the rsil and wsr instructions. By using 122 * a14 in the rsil, the machine is guaranteed to be in a state 123 * where no register reference will cause an overflow. 124 */ 125 126 static inline unsigned long xchg_u32(volatile int * m, unsigned long val) 127 { 128 #if XCHAL_HAVE_EXCLUSIVE 129 unsigned long tmp, result; 130 131 __asm__ __volatile__( 132 "1: l32ex %[result], %[addr]\n" 133 " mov %[tmp], %[val]\n" 134 " s32ex %[tmp], %[addr]\n" 135 " getex %[tmp]\n" 136 " beqz %[tmp], 1b\n" 137 : [result] "=&a" (result), [tmp] "=&a" (tmp) 138 : [val] "a" (val), [addr] "a" (m) 139 : "memory" 140 ); 141 142 return result; 143 #elif XCHAL_HAVE_S32C1I 144 unsigned long tmp, result; 145 __asm__ __volatile__( 146 "1: l32i %[tmp], %[mem]\n" 147 " mov %[result], %[val]\n" 148 " wsr %[tmp], scompare1\n" 149 " s32c1i %[result], %[mem]\n" 150 " bne %[result], %[tmp], 1b\n" 151 : [result] "=&a" (result), [tmp] "=&a" (tmp), 152 [mem] "+m" (*m) 153 : [val] "a" (val) 154 : "memory" 155 ); 156 return result; 157 #else 158 unsigned long tmp; 159 __asm__ __volatile__( 160 " rsil a14, "__stringify(TOPLEVEL)"\n" 161 " l32i %[tmp], %[mem]\n" 162 " s32i %[val], %[mem]\n" 163 " wsr a14, ps\n" 164 " rsync\n" 165 : [tmp] "=&a" (tmp), [mem] "+m" (*m) 166 : [val] "a" (val) 167 : "a14", "memory"); 168 return tmp; 169 #endif 170 } 171 172 #define arch_xchg(ptr,x) \ 173 ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 174 175 static inline u32 xchg_small(volatile void *ptr, u32 x, int size) 176 { 177 int off = (unsigned long)ptr % sizeof(u32); 178 volatile u32 *p = ptr - off; 179 #ifdef __BIG_ENDIAN 180 int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE; 181 #else 182 int bitoff = off * BITS_PER_BYTE; 183 #endif 184 u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; 185 u32 oldv, newv; 186 u32 ret; 187 188 do { 189 oldv = READ_ONCE(*p); 190 ret = (oldv & bitmask) >> bitoff; 191 newv = (oldv & ~bitmask) | (x << bitoff); 192 } while (__cmpxchg_u32(p, oldv, newv) != oldv); 193 194 return ret; 195 } 196 197 /* 198 * This only works if the compiler isn't horribly bad at optimizing. 199 * gcc-2.5.8 reportedly can't handle this, but I define that one to 200 * be dead anyway. 201 */ 202 203 extern void __xchg_called_with_bad_pointer(void); 204 205 static __inline__ unsigned long 206 __arch_xchg(unsigned long x, volatile void * ptr, int size) 207 { 208 switch (size) { 209 case 1: 210 return xchg_small(ptr, x, 1); 211 case 2: 212 return xchg_small(ptr, x, 2); 213 case 4: 214 return xchg_u32(ptr, x); 215 default: 216 __xchg_called_with_bad_pointer(); 217 return x; 218 } 219 } 220 221 #endif /* __ASSEMBLY__ */ 222 223 #endif /* _XTENSA_CMPXCHG_H */ 224
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.