~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/parisc/include/asm/atomic.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
  3  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
  4  */
  5 
  6 #ifndef _ASM_PARISC_ATOMIC_H_
  7 #define _ASM_PARISC_ATOMIC_H_
  8 
  9 #include <linux/types.h>
 10 #include <asm/cmpxchg.h>
 11 #include <asm/barrier.h>
 12 
 13 /*
 14  * Atomic operations that C can't guarantee us.  Useful for
 15  * resource counting etc..
 16  *
 17  * And probably incredibly slow on parisc.  OTOH, we don't
 18  * have to write any serious assembly.   prumpf
 19  */
 20 
 21 #ifdef CONFIG_SMP
 22 #include <asm/spinlock.h>
 23 #include <asm/cache.h>          /* we use L1_CACHE_BYTES */
 24 
 25 /* Use an array of spinlocks for our atomic_ts.
 26  * Hash function to index into a different SPINLOCK.
 27  * Since "a" is usually an address, use one spinlock per cacheline.
 28  */
 29 #  define ATOMIC_HASH_SIZE 4
 30 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 31 
 32 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 33 
 34 /* Can't use raw_spin_lock_irq because of #include problems, so
 35  * this is the substitute */
 36 #define _atomic_spin_lock_irqsave(l,f) do {     \
 37         arch_spinlock_t *s = ATOMIC_HASH(l);    \
 38         local_irq_save(f);                      \
 39         arch_spin_lock(s);                      \
 40 } while(0)
 41 
 42 #define _atomic_spin_unlock_irqrestore(l,f) do {        \
 43         arch_spinlock_t *s = ATOMIC_HASH(l);            \
 44         arch_spin_unlock(s);                            \
 45         local_irq_restore(f);                           \
 46 } while(0)
 47 
 48 
 49 #else
 50 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
 51 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
 52 #endif
 53 
 54 /*
 55  * Note that we need not lock read accesses - aligned word writes/reads
 56  * are atomic, so a reader never sees inconsistent values.
 57  */
 58 
 59 static __inline__ void arch_atomic_set(atomic_t *v, int i)
 60 {
 61         unsigned long flags;
 62         _atomic_spin_lock_irqsave(v, flags);
 63 
 64         v->counter = i;
 65 
 66         _atomic_spin_unlock_irqrestore(v, flags);
 67 }
 68 
 69 #define arch_atomic_set_release(v, i)   arch_atomic_set((v), (i))
 70 
 71 static __inline__ int arch_atomic_read(const atomic_t *v)
 72 {
 73         return READ_ONCE((v)->counter);
 74 }
 75 
 76 #define ATOMIC_OP(op, c_op)                                             \
 77 static __inline__ void arch_atomic_##op(int i, atomic_t *v)             \
 78 {                                                                       \
 79         unsigned long flags;                                            \
 80                                                                         \
 81         _atomic_spin_lock_irqsave(v, flags);                            \
 82         v->counter c_op i;                                              \
 83         _atomic_spin_unlock_irqrestore(v, flags);                       \
 84 }
 85 
 86 #define ATOMIC_OP_RETURN(op, c_op)                                      \
 87 static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v)     \
 88 {                                                                       \
 89         unsigned long flags;                                            \
 90         int ret;                                                        \
 91                                                                         \
 92         _atomic_spin_lock_irqsave(v, flags);                            \
 93         ret = (v->counter c_op i);                                      \
 94         _atomic_spin_unlock_irqrestore(v, flags);                       \
 95                                                                         \
 96         return ret;                                                     \
 97 }
 98 
 99 #define ATOMIC_FETCH_OP(op, c_op)                                       \
100 static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v)        \
101 {                                                                       \
102         unsigned long flags;                                            \
103         int ret;                                                        \
104                                                                         \
105         _atomic_spin_lock_irqsave(v, flags);                            \
106         ret = v->counter;                                               \
107         v->counter c_op i;                                              \
108         _atomic_spin_unlock_irqrestore(v, flags);                       \
109                                                                         \
110         return ret;                                                     \
111 }
112 
113 #define ATOMIC_OPS(op, c_op)                                            \
114         ATOMIC_OP(op, c_op)                                             \
115         ATOMIC_OP_RETURN(op, c_op)                                      \
116         ATOMIC_FETCH_OP(op, c_op)
117 
118 ATOMIC_OPS(add, +=)
119 ATOMIC_OPS(sub, -=)
120 
121 #define arch_atomic_add_return  arch_atomic_add_return
122 #define arch_atomic_sub_return  arch_atomic_sub_return
123 #define arch_atomic_fetch_add   arch_atomic_fetch_add
124 #define arch_atomic_fetch_sub   arch_atomic_fetch_sub
125 
126 #undef ATOMIC_OPS
127 #define ATOMIC_OPS(op, c_op)                                            \
128         ATOMIC_OP(op, c_op)                                             \
129         ATOMIC_FETCH_OP(op, c_op)
130 
131 ATOMIC_OPS(and, &=)
132 ATOMIC_OPS(or, |=)
133 ATOMIC_OPS(xor, ^=)
134 
135 #define arch_atomic_fetch_and   arch_atomic_fetch_and
136 #define arch_atomic_fetch_or    arch_atomic_fetch_or
137 #define arch_atomic_fetch_xor   arch_atomic_fetch_xor
138 
139 #undef ATOMIC_OPS
140 #undef ATOMIC_FETCH_OP
141 #undef ATOMIC_OP_RETURN
142 #undef ATOMIC_OP
143 
144 #ifdef CONFIG_64BIT
145 
146 #define ATOMIC64_INIT(i) { (i) }
147 
148 #define ATOMIC64_OP(op, c_op)                                           \
149 static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v)         \
150 {                                                                       \
151         unsigned long flags;                                            \
152                                                                         \
153         _atomic_spin_lock_irqsave(v, flags);                            \
154         v->counter c_op i;                                              \
155         _atomic_spin_unlock_irqrestore(v, flags);                       \
156 }
157 
158 #define ATOMIC64_OP_RETURN(op, c_op)                                    \
159 static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
160 {                                                                       \
161         unsigned long flags;                                            \
162         s64 ret;                                                        \
163                                                                         \
164         _atomic_spin_lock_irqsave(v, flags);                            \
165         ret = (v->counter c_op i);                                      \
166         _atomic_spin_unlock_irqrestore(v, flags);                       \
167                                                                         \
168         return ret;                                                     \
169 }
170 
171 #define ATOMIC64_FETCH_OP(op, c_op)                                     \
172 static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v)    \
173 {                                                                       \
174         unsigned long flags;                                            \
175         s64 ret;                                                        \
176                                                                         \
177         _atomic_spin_lock_irqsave(v, flags);                            \
178         ret = v->counter;                                               \
179         v->counter c_op i;                                              \
180         _atomic_spin_unlock_irqrestore(v, flags);                       \
181                                                                         \
182         return ret;                                                     \
183 }
184 
185 #define ATOMIC64_OPS(op, c_op)                                          \
186         ATOMIC64_OP(op, c_op)                                           \
187         ATOMIC64_OP_RETURN(op, c_op)                                    \
188         ATOMIC64_FETCH_OP(op, c_op)
189 
190 ATOMIC64_OPS(add, +=)
191 ATOMIC64_OPS(sub, -=)
192 
193 #define arch_atomic64_add_return        arch_atomic64_add_return
194 #define arch_atomic64_sub_return        arch_atomic64_sub_return
195 #define arch_atomic64_fetch_add         arch_atomic64_fetch_add
196 #define arch_atomic64_fetch_sub         arch_atomic64_fetch_sub
197 
198 #undef ATOMIC64_OPS
199 #define ATOMIC64_OPS(op, c_op)                                          \
200         ATOMIC64_OP(op, c_op)                                           \
201         ATOMIC64_FETCH_OP(op, c_op)
202 
203 ATOMIC64_OPS(and, &=)
204 ATOMIC64_OPS(or, |=)
205 ATOMIC64_OPS(xor, ^=)
206 
207 #define arch_atomic64_fetch_and         arch_atomic64_fetch_and
208 #define arch_atomic64_fetch_or          arch_atomic64_fetch_or
209 #define arch_atomic64_fetch_xor         arch_atomic64_fetch_xor
210 
211 #undef ATOMIC64_OPS
212 #undef ATOMIC64_FETCH_OP
213 #undef ATOMIC64_OP_RETURN
214 #undef ATOMIC64_OP
215 
216 static __inline__ void
217 arch_atomic64_set(atomic64_t *v, s64 i)
218 {
219         unsigned long flags;
220         _atomic_spin_lock_irqsave(v, flags);
221 
222         v->counter = i;
223 
224         _atomic_spin_unlock_irqrestore(v, flags);
225 }
226 
227 #define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
228 
229 static __inline__ s64
230 arch_atomic64_read(const atomic64_t *v)
231 {
232         return READ_ONCE((v)->counter);
233 }
234 
235 #endif /* !CONFIG_64BIT */
236 
237 
238 #endif /* _ASM_PARISC_ATOMIC_H_ */
239 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php