~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/alpha/include/asm/atomic.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ALPHA_ATOMIC_H
  3 #define _ALPHA_ATOMIC_H
  4 
  5 #include <linux/types.h>
  6 #include <asm/barrier.h>
  7 #include <asm/cmpxchg.h>
  8 
  9 /*
 10  * Atomic operations that C can't guarantee us.  Useful for
 11  * resource counting etc...
 12  *
 13  * But use these as seldom as possible since they are much slower
 14  * than regular operations.
 15  */
 16 
 17 /*
 18  * To ensure dependency ordering is preserved for the _relaxed and
 19  * _release atomics, an smp_mb() is unconditionally inserted into the
 20  * _relaxed variants, which are used to build the barriered versions.
 21  * Avoid redundant back-to-back fences in the _acquire and _fence
 22  * versions.
 23  */
 24 #define __atomic_acquire_fence()
 25 #define __atomic_post_full_fence()
 26 
 27 #define ATOMIC64_INIT(i)        { (i) }
 28 
 29 #define arch_atomic_read(v)     READ_ONCE((v)->counter)
 30 #define arch_atomic64_read(v)   READ_ONCE((v)->counter)
 31 
 32 #define arch_atomic_set(v,i)    WRITE_ONCE((v)->counter, (i))
 33 #define arch_atomic64_set(v,i)  WRITE_ONCE((v)->counter, (i))
 34 
 35 /*
 36  * To get proper branch prediction for the main line, we must branch
 37  * forward to code at the end of this object's .text section, then
 38  * branch back to restart the operation.
 39  */
 40 
 41 #define ATOMIC_OP(op, asm_op)                                           \
 42 static __inline__ void arch_atomic_##op(int i, atomic_t * v)            \
 43 {                                                                       \
 44         unsigned long temp;                                             \
 45         __asm__ __volatile__(                                           \
 46         "1:     ldl_l %0,%1\n"                                          \
 47         "       " #asm_op " %0,%2,%0\n"                                 \
 48         "       stl_c %0,%1\n"                                          \
 49         "       beq %0,2f\n"                                            \
 50         ".subsection 2\n"                                               \
 51         "2:     br 1b\n"                                                \
 52         ".previous"                                                     \
 53         :"=&r" (temp), "=m" (v->counter)                                \
 54         :"Ir" (i), "m" (v->counter));                                   \
 55 }                                                                       \
 56 
 57 #define ATOMIC_OP_RETURN(op, asm_op)                                    \
 58 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
 59 {                                                                       \
 60         long temp, result;                                              \
 61         __asm__ __volatile__(                                           \
 62         "1:     ldl_l %0,%1\n"                                          \
 63         "       " #asm_op " %0,%3,%2\n"                                 \
 64         "       " #asm_op " %0,%3,%0\n"                                 \
 65         "       stl_c %0,%1\n"                                          \
 66         "       beq %0,2f\n"                                            \
 67         ".subsection 2\n"                                               \
 68         "2:     br 1b\n"                                                \
 69         ".previous"                                                     \
 70         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
 71         :"Ir" (i), "m" (v->counter) : "memory");                        \
 72         smp_mb();                                                       \
 73         return result;                                                  \
 74 }
 75 
 76 #define ATOMIC_FETCH_OP(op, asm_op)                                     \
 77 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)  \
 78 {                                                                       \
 79         long temp, result;                                              \
 80         __asm__ __volatile__(                                           \
 81         "1:     ldl_l %2,%1\n"                                          \
 82         "       " #asm_op " %2,%3,%0\n"                                 \
 83         "       stl_c %0,%1\n"                                          \
 84         "       beq %0,2f\n"                                            \
 85         ".subsection 2\n"                                               \
 86         "2:     br 1b\n"                                                \
 87         ".previous"                                                     \
 88         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
 89         :"Ir" (i), "m" (v->counter) : "memory");                        \
 90         smp_mb();                                                       \
 91         return result;                                                  \
 92 }
 93 
 94 #define ATOMIC64_OP(op, asm_op)                                         \
 95 static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v)        \
 96 {                                                                       \
 97         s64 temp;                                                       \
 98         __asm__ __volatile__(                                           \
 99         "1:     ldq_l %0,%1\n"                                          \
100         "       " #asm_op " %0,%2,%0\n"                                 \
101         "       stq_c %0,%1\n"                                          \
102         "       beq %0,2f\n"                                            \
103         ".subsection 2\n"                                               \
104         "2:     br 1b\n"                                                \
105         ".previous"                                                     \
106         :"=&r" (temp), "=m" (v->counter)                                \
107         :"Ir" (i), "m" (v->counter));                                   \
108 }                                                                       \
109 
110 #define ATOMIC64_OP_RETURN(op, asm_op)                                  \
111 static __inline__ s64                                                   \
112 arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)              \
113 {                                                                       \
114         s64 temp, result;                                               \
115         __asm__ __volatile__(                                           \
116         "1:     ldq_l %0,%1\n"                                          \
117         "       " #asm_op " %0,%3,%2\n"                                 \
118         "       " #asm_op " %0,%3,%0\n"                                 \
119         "       stq_c %0,%1\n"                                          \
120         "       beq %0,2f\n"                                            \
121         ".subsection 2\n"                                               \
122         "2:     br 1b\n"                                                \
123         ".previous"                                                     \
124         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
125         :"Ir" (i), "m" (v->counter) : "memory");                        \
126         smp_mb();                                                       \
127         return result;                                                  \
128 }
129 
130 #define ATOMIC64_FETCH_OP(op, asm_op)                                   \
131 static __inline__ s64                                                   \
132 arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)               \
133 {                                                                       \
134         s64 temp, result;                                               \
135         __asm__ __volatile__(                                           \
136         "1:     ldq_l %2,%1\n"                                          \
137         "       " #asm_op " %2,%3,%0\n"                                 \
138         "       stq_c %0,%1\n"                                          \
139         "       beq %0,2f\n"                                            \
140         ".subsection 2\n"                                               \
141         "2:     br 1b\n"                                                \
142         ".previous"                                                     \
143         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
144         :"Ir" (i), "m" (v->counter) : "memory");                        \
145         smp_mb();                                                       \
146         return result;                                                  \
147 }
148 
149 #define ATOMIC_OPS(op)                                                  \
150         ATOMIC_OP(op, op##l)                                            \
151         ATOMIC_OP_RETURN(op, op##l)                                     \
152         ATOMIC_FETCH_OP(op, op##l)                                      \
153         ATOMIC64_OP(op, op##q)                                          \
154         ATOMIC64_OP_RETURN(op, op##q)                                   \
155         ATOMIC64_FETCH_OP(op, op##q)
156 
157 ATOMIC_OPS(add)
158 ATOMIC_OPS(sub)
159 
160 #define arch_atomic_add_return_relaxed          arch_atomic_add_return_relaxed
161 #define arch_atomic_sub_return_relaxed          arch_atomic_sub_return_relaxed
162 #define arch_atomic_fetch_add_relaxed           arch_atomic_fetch_add_relaxed
163 #define arch_atomic_fetch_sub_relaxed           arch_atomic_fetch_sub_relaxed
164 
165 #define arch_atomic64_add_return_relaxed        arch_atomic64_add_return_relaxed
166 #define arch_atomic64_sub_return_relaxed        arch_atomic64_sub_return_relaxed
167 #define arch_atomic64_fetch_add_relaxed         arch_atomic64_fetch_add_relaxed
168 #define arch_atomic64_fetch_sub_relaxed         arch_atomic64_fetch_sub_relaxed
169 
170 #define arch_atomic_andnot                      arch_atomic_andnot
171 #define arch_atomic64_andnot                    arch_atomic64_andnot
172 
173 #undef ATOMIC_OPS
174 #define ATOMIC_OPS(op, asm)                                             \
175         ATOMIC_OP(op, asm)                                              \
176         ATOMIC_FETCH_OP(op, asm)                                        \
177         ATOMIC64_OP(op, asm)                                            \
178         ATOMIC64_FETCH_OP(op, asm)
179 
180 ATOMIC_OPS(and, and)
181 ATOMIC_OPS(andnot, bic)
182 ATOMIC_OPS(or, bis)
183 ATOMIC_OPS(xor, xor)
184 
185 #define arch_atomic_fetch_and_relaxed           arch_atomic_fetch_and_relaxed
186 #define arch_atomic_fetch_andnot_relaxed        arch_atomic_fetch_andnot_relaxed
187 #define arch_atomic_fetch_or_relaxed            arch_atomic_fetch_or_relaxed
188 #define arch_atomic_fetch_xor_relaxed           arch_atomic_fetch_xor_relaxed
189 
190 #define arch_atomic64_fetch_and_relaxed         arch_atomic64_fetch_and_relaxed
191 #define arch_atomic64_fetch_andnot_relaxed      arch_atomic64_fetch_andnot_relaxed
192 #define arch_atomic64_fetch_or_relaxed          arch_atomic64_fetch_or_relaxed
193 #define arch_atomic64_fetch_xor_relaxed         arch_atomic64_fetch_xor_relaxed
194 
195 #undef ATOMIC_OPS
196 #undef ATOMIC64_FETCH_OP
197 #undef ATOMIC64_OP_RETURN
198 #undef ATOMIC64_OP
199 #undef ATOMIC_FETCH_OP
200 #undef ATOMIC_OP_RETURN
201 #undef ATOMIC_OP
202 
203 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
204 {
205         int c, new, old;
206         smp_mb();
207         __asm__ __volatile__(
208         "1:     ldl_l   %[old],%[mem]\n"
209         "       cmpeq   %[old],%[u],%[c]\n"
210         "       addl    %[old],%[a],%[new]\n"
211         "       bne     %[c],2f\n"
212         "       stl_c   %[new],%[mem]\n"
213         "       beq     %[new],3f\n"
214         "2:\n"
215         ".subsection 2\n"
216         "3:     br      1b\n"
217         ".previous"
218         : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
219         : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
220         : "memory");
221         smp_mb();
222         return old;
223 }
224 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
225 
226 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
227 {
228         s64 c, new, old;
229         smp_mb();
230         __asm__ __volatile__(
231         "1:     ldq_l   %[old],%[mem]\n"
232         "       cmpeq   %[old],%[u],%[c]\n"
233         "       addq    %[old],%[a],%[new]\n"
234         "       bne     %[c],2f\n"
235         "       stq_c   %[new],%[mem]\n"
236         "       beq     %[new],3f\n"
237         "2:\n"
238         ".subsection 2\n"
239         "3:     br      1b\n"
240         ".previous"
241         : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
242         : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
243         : "memory");
244         smp_mb();
245         return old;
246 }
247 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
248 
249 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
250 {
251         s64 old, tmp;
252         smp_mb();
253         __asm__ __volatile__(
254         "1:     ldq_l   %[old],%[mem]\n"
255         "       subq    %[old],1,%[tmp]\n"
256         "       ble     %[old],2f\n"
257         "       stq_c   %[tmp],%[mem]\n"
258         "       beq     %[tmp],3f\n"
259         "2:\n"
260         ".subsection 2\n"
261         "3:     br      1b\n"
262         ".previous"
263         : [old] "=&r"(old), [tmp] "=&r"(tmp)
264         : [mem] "m"(*v)
265         : "memory");
266         smp_mb();
267         return old - 1;
268 }
269 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
270 
271 #endif /* _ALPHA_ATOMIC_H */
272 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php