1 /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef __ARCH_M68K_ATOMIC__ 3 #define __ARCH_M68K_ATOMIC__ 4 5 #include <linux/types.h> 6 #include <linux/irqflags.h> 7 #include <asm/cmpxchg.h> 8 #include <asm/barrier.h> 9 10 /* 11 * Atomic operations that C can't guarantee us 12 * resource counting etc.. 13 */ 14 15 /* 16 * We do not have SMP m68k systems, so we don' 17 */ 18 19 #define arch_atomic_read(v) READ_ONCE((v)- 20 #define arch_atomic_set(v, i) WRITE_ONCE(((v 21 22 /* 23 * The ColdFire parts cannot do some immediate 24 * so for them we do not specify the "i" asm c 25 */ 26 #ifdef CONFIG_COLDFIRE 27 #define ASM_DI "d" 28 #else 29 #define ASM_DI "di" 30 #endif 31 32 #define ATOMIC_OP(op, c_op, asm_op) 33 static inline void arch_atomic_##op(int i, ato 34 { 35 __asm__ __volatile__(#asm_op "l %1,%0" 36 } 37 38 #ifdef CONFIG_RMW_INSNS 39 40 #define ATOMIC_OP_RETURN(op, c_op, asm_op) 41 static inline int arch_atomic_##op##_return(in 42 { 43 int t, tmp; 44 45 __asm__ __volatile__( 46 "1: movel %2,%1\n" 47 " " #asm_op "l % 48 " casl %2,%1,%0\ 49 " jne 1b" 50 : "+m" (*v), "=&d" (t) 51 : "di" (i), "2" (arch_ 52 return t; 53 } 54 55 #define ATOMIC_FETCH_OP(op, c_op, asm_op) 56 static inline int arch_atomic_fetch_##op(int i 57 { 58 int t, tmp; 59 60 __asm__ __volatile__( 61 "1: movel %2,%1\n" 62 " " #asm_op "l % 63 " casl %2,%1,%0\ 64 " jne 1b" 65 : "+m" (*v), "=&d" (t) 66 : "di" (i), "2" (arch_ 67 return tmp; 68 } 69 70 #else 71 72 #define ATOMIC_OP_RETURN(op, c_op, asm_op) 73 static inline int arch_atomic_##op##_return(in 74 { 75 unsigned long flags; 76 int t; 77 78 local_irq_save(flags); 79 t = (v->counter c_op i); 80 local_irq_restore(flags); 81 82 return t; 83 } 84 85 #define ATOMIC_FETCH_OP(op, c_op, asm_op) 86 static inline int arch_atomic_fetch_##op(int i 87 { 88 unsigned long flags; 89 int t; 90 91 local_irq_save(flags); 92 t = v->counter; 93 v->counter c_op i; 94 local_irq_restore(flags); 95 96 return t; 97 } 98 99 #endif /* CONFIG_RMW_INSNS */ 100 101 #define ATOMIC_OPS(op, c_op, asm_op) 102 ATOMIC_OP(op, c_op, asm_op) 103 ATOMIC_OP_RETURN(op, c_op, asm_op) 104 ATOMIC_FETCH_OP(op, c_op, asm_op) 105 106 ATOMIC_OPS(add, +=, add) 107 ATOMIC_OPS(sub, -=, sub) 108 109 #define arch_atomic_add_return 110 #define arch_atomic_sub_return 111 #define arch_atomic_fetch_add 112 #define arch_atomic_fetch_sub 113 114 #undef ATOMIC_OPS 115 #define ATOMIC_OPS(op, c_op, asm_op) 116 ATOMIC_OP(op, c_op, asm_op) 117 ATOMIC_FETCH_OP(op, c_op, asm_op) 118 119 ATOMIC_OPS(and, &=, and) 120 ATOMIC_OPS(or, |=, or) 121 ATOMIC_OPS(xor, ^=, eor) 122 123 #define arch_atomic_fetch_and 124 #define arch_atomic_fetch_or 125 #define arch_atomic_fetch_xor 126 127 #undef ATOMIC_OPS 128 #undef ATOMIC_FETCH_OP 129 #undef ATOMIC_OP_RETURN 130 #undef ATOMIC_OP 131 132 static inline void arch_atomic_inc(atomic_t *v 133 { 134 __asm__ __volatile__("addql #1,%0" : " 135 } 136 #define arch_atomic_inc arch_atomic_inc 137 138 static inline void arch_atomic_dec(atomic_t *v 139 { 140 __asm__ __volatile__("subql #1,%0" : " 141 } 142 #define arch_atomic_dec arch_atomic_dec 143 144 static inline int arch_atomic_dec_and_test(ato 145 { 146 char c; 147 __asm__ __volatile__("subql #1,%1; seq 148 return c != 0; 149 } 150 #define arch_atomic_dec_and_test arch_atomic_d 151 152 static inline int arch_atomic_dec_and_test_lt( 153 { 154 char c; 155 __asm__ __volatile__( 156 "subql #1,%1; slt %0" 157 : "=d" (c), "=m" (*v) 158 : "m" (*v)); 159 return c != 0; 160 } 161 162 static inline int arch_atomic_inc_and_test(ato 163 { 164 char c; 165 __asm__ __volatile__("addql #1,%1; seq 166 return c != 0; 167 } 168 #define arch_atomic_inc_and_test arch_atomic_i 169 170 #ifndef CONFIG_RMW_INSNS 171 172 static inline int arch_atomic_cmpxchg(atomic_t 173 { 174 unsigned long flags; 175 int prev; 176 177 local_irq_save(flags); 178 prev = arch_atomic_read(v); 179 if (prev == old) 180 arch_atomic_set(v, new); 181 local_irq_restore(flags); 182 return prev; 183 } 184 #define arch_atomic_cmpxchg arch_atomic_cmpxch 185 186 static inline int arch_atomic_xchg(atomic_t *v 187 { 188 unsigned long flags; 189 int prev; 190 191 local_irq_save(flags); 192 prev = arch_atomic_read(v); 193 arch_atomic_set(v, new); 194 local_irq_restore(flags); 195 return prev; 196 } 197 #define arch_atomic_xchg arch_atomic_xchg 198 199 #endif /* !CONFIG_RMW_INSNS */ 200 201 static inline int arch_atomic_sub_and_test(int 202 { 203 char c; 204 __asm__ __volatile__("subl %2,%1; seq 205 : "=d" (c), "+m" 206 : ASM_DI (i)); 207 return c != 0; 208 } 209 #define arch_atomic_sub_and_test arch_atomic_s 210 211 static inline int arch_atomic_add_negative(int 212 { 213 char c; 214 __asm__ __volatile__("addl %2,%1; smi 215 : "=d" (c), "+m" 216 : ASM_DI (i)); 217 return c != 0; 218 } 219 #define arch_atomic_add_negative arch_atomic_a 220 221 #endif /* __ARCH_M68K_ATOMIC __ */ 222
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.