~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/include/asm/atomic_ops.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Low level function for atomic operations
  4  *
  5  * Copyright IBM Corp. 1999, 2016
  6  */
  7 
  8 #ifndef __ARCH_S390_ATOMIC_OPS__
  9 #define __ARCH_S390_ATOMIC_OPS__
 10 
 11 #include <linux/limits.h>
 12 
 13 static __always_inline int __atomic_read(const atomic_t *v)
 14 {
 15         int c;
 16 
 17         asm volatile(
 18                 "       l       %[c],%[counter]\n"
 19                 : [c] "=d" (c) : [counter] "R" (v->counter));
 20         return c;
 21 }
 22 
 23 static __always_inline void __atomic_set(atomic_t *v, int i)
 24 {
 25         if (__builtin_constant_p(i) && i >= S16_MIN && i <= S16_MAX) {
 26                 asm volatile(
 27                         "       mvhi    %[counter], %[i]\n"
 28                         : [counter] "=Q" (v->counter) : [i] "K" (i));
 29         } else {
 30                 asm volatile(
 31                         "       st      %[i],%[counter]\n"
 32                         : [counter] "=R" (v->counter) : [i] "d" (i));
 33         }
 34 }
 35 
 36 static __always_inline s64 __atomic64_read(const atomic64_t *v)
 37 {
 38         s64 c;
 39 
 40         asm volatile(
 41                 "       lg      %[c],%[counter]\n"
 42                 : [c] "=d" (c) : [counter] "RT" (v->counter));
 43         return c;
 44 }
 45 
 46 static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
 47 {
 48         if (__builtin_constant_p(i) && i >= S16_MIN && i <= S16_MAX) {
 49                 asm volatile(
 50                         "       mvghi   %[counter], %[i]\n"
 51                         : [counter] "=Q" (v->counter) : [i] "K" (i));
 52         } else {
 53                 asm volatile(
 54                         "       stg     %[i],%[counter]\n"
 55                         : [counter] "=RT" (v->counter) : [i] "d" (i));
 56         }
 57 }
 58 
 59 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 60 
 61 #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier)            \
 62 static __always_inline op_type op_name(op_type val, op_type *ptr)       \
 63 {                                                                       \
 64         op_type old;                                                    \
 65                                                                         \
 66         asm volatile(                                                   \
 67                 op_string "     %[old],%[val],%[ptr]\n"                 \
 68                 op_barrier                                              \
 69                 : [old] "=d" (old), [ptr] "+QS" (*ptr)                  \
 70                 : [val] "d" (val) : "cc", "memory");                    \
 71         return old;                                                     \
 72 }                                                                       \
 73 
 74 #define __ATOMIC_OPS(op_name, op_type, op_string)                       \
 75         __ATOMIC_OP(op_name, op_type, op_string, "\n")                  \
 76         __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
 77 
 78 __ATOMIC_OPS(__atomic_add, int, "laa")
 79 __ATOMIC_OPS(__atomic_and, int, "lan")
 80 __ATOMIC_OPS(__atomic_or,  int, "lao")
 81 __ATOMIC_OPS(__atomic_xor, int, "lax")
 82 
 83 __ATOMIC_OPS(__atomic64_add, long, "laag")
 84 __ATOMIC_OPS(__atomic64_and, long, "lang")
 85 __ATOMIC_OPS(__atomic64_or,  long, "laog")
 86 __ATOMIC_OPS(__atomic64_xor, long, "laxg")
 87 
 88 #undef __ATOMIC_OPS
 89 #undef __ATOMIC_OP
 90 
 91 #define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier)      \
 92 static __always_inline void op_name(op_type val, op_type *ptr)          \
 93 {                                                                       \
 94         asm volatile(                                                   \
 95                 op_string "     %[ptr],%[val]\n"                        \
 96                 op_barrier                                              \
 97                 : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\
 98 }
 99 
100 #define __ATOMIC_CONST_OPS(op_name, op_type, op_string)                 \
101         __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n")            \
102         __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
103 
104 __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
105 __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
106 
107 #undef __ATOMIC_CONST_OPS
108 #undef __ATOMIC_CONST_OP
109 
110 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
111 
112 #define __ATOMIC_OP(op_name, op_string)                                 \
113 static __always_inline int op_name(int val, int *ptr)                   \
114 {                                                                       \
115         int old, new;                                                   \
116                                                                         \
117         asm volatile(                                                   \
118                 "0:     lr      %[new],%[old]\n"                        \
119                 op_string "     %[new],%[val]\n"                        \
120                 "       cs      %[old],%[new],%[ptr]\n"                 \
121                 "       jl      0b"                                     \
122                 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
123                 : [val] "d" (val), "" (*ptr) : "cc", "memory");        \
124         return old;                                                     \
125 }
126 
127 #define __ATOMIC_OPS(op_name, op_string)                                \
128         __ATOMIC_OP(op_name, op_string)                                 \
129         __ATOMIC_OP(op_name##_barrier, op_string)
130 
131 __ATOMIC_OPS(__atomic_add, "ar")
132 __ATOMIC_OPS(__atomic_and, "nr")
133 __ATOMIC_OPS(__atomic_or,  "or")
134 __ATOMIC_OPS(__atomic_xor, "xr")
135 
136 #undef __ATOMIC_OPS
137 
138 #define __ATOMIC64_OP(op_name, op_string)                               \
139 static __always_inline long op_name(long val, long *ptr)                \
140 {                                                                       \
141         long old, new;                                                  \
142                                                                         \
143         asm volatile(                                                   \
144                 "0:     lgr     %[new],%[old]\n"                        \
145                 op_string "     %[new],%[val]\n"                        \
146                 "       csg     %[old],%[new],%[ptr]\n"                 \
147                 "       jl      0b"                                     \
148                 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\
149                 : [val] "d" (val), "" (*ptr) : "cc", "memory");        \
150         return old;                                                     \
151 }
152 
153 #define __ATOMIC64_OPS(op_name, op_string)                              \
154         __ATOMIC64_OP(op_name, op_string)                               \
155         __ATOMIC64_OP(op_name##_barrier, op_string)
156 
157 __ATOMIC64_OPS(__atomic64_add, "agr")
158 __ATOMIC64_OPS(__atomic64_and, "ngr")
159 __ATOMIC64_OPS(__atomic64_or,  "ogr")
160 __ATOMIC64_OPS(__atomic64_xor, "xgr")
161 
162 #undef __ATOMIC64_OPS
163 
164 #define __atomic_add_const(val, ptr)            __atomic_add(val, ptr)
165 #define __atomic_add_const_barrier(val, ptr)    __atomic_add(val, ptr)
166 #define __atomic64_add_const(val, ptr)          __atomic64_add(val, ptr)
167 #define __atomic64_add_const_barrier(val, ptr)  __atomic64_add(val, ptr)
168 
169 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
170 
171 static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
172 {
173         asm volatile(
174                 "       cs      %[old],%[new],%[ptr]"
175                 : [old] "+d" (old), [ptr] "+Q" (*ptr)
176                 : [new] "d" (new)
177                 : "cc", "memory");
178         return old;
179 }
180 
181 static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
182 {
183         asm volatile(
184                 "       csg     %[old],%[new],%[ptr]"
185                 : [old] "+d" (old), [ptr] "+QS" (*ptr)
186                 : [new] "d" (new)
187                 : "cc", "memory");
188         return old;
189 }
190 
191 /* GCC versions before 14.2.0 may die with an ICE in some configurations. */
192 #if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_IS_GCC) && (GCC_VERSION < 140200))
193 
194 static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
195 {
196         int cc;
197 
198         asm volatile(
199                 "       cs      %[old],%[new],%[ptr]"
200                 : [old] "+d" (old), [ptr] "+Q" (*ptr), "=@cc" (cc)
201                 : [new] "d" (new)
202                 : "memory");
203         return cc == 0;
204 }
205 
206 static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
207 {
208         int cc;
209 
210         asm volatile(
211                 "       csg     %[old],%[new],%[ptr]"
212                 : [old] "+d" (old), [ptr] "+QS" (*ptr), "=@cc" (cc)
213                 : [new] "d" (new)
214                 : "memory");
215         return cc == 0;
216 }
217 
218 #else /* __GCC_ASM_FLAG_OUTPUTS__ */
219 
220 static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
221 {
222         int old_expected = old;
223 
224         asm volatile(
225                 "       cs      %[old],%[new],%[ptr]"
226                 : [old] "+d" (old), [ptr] "+Q" (*ptr)
227                 : [new] "d" (new)
228                 : "cc", "memory");
229         return old == old_expected;
230 }
231 
232 static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
233 {
234         long old_expected = old;
235 
236         asm volatile(
237                 "       csg     %[old],%[new],%[ptr]"
238                 : [old] "+d" (old), [ptr] "+QS" (*ptr)
239                 : [new] "d" (new)
240                 : "cc", "memory");
241         return old == old_expected;
242 }
243 
244 #endif /* __GCC_ASM_FLAG_OUTPUTS__ */
245 
246 #endif /* __ARCH_S390_ATOMIC_OPS__  */
247 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php