~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/include/asm/atomic.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Copyright IBM Corp. 1999, 2016
  4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  5  *            Denis Joseph Barrow,
  6  *            Arnd Bergmann,
  7  */
  8 
  9 #ifndef __ARCH_S390_ATOMIC__
 10 #define __ARCH_S390_ATOMIC__
 11 
 12 #include <linux/compiler.h>
 13 #include <linux/types.h>
 14 #include <asm/atomic_ops.h>
 15 #include <asm/barrier.h>
 16 #include <asm/cmpxchg.h>
 17 
 18 static __always_inline int arch_atomic_read(const atomic_t *v)
 19 {
 20         return __atomic_read(v);
 21 }
 22 #define arch_atomic_read arch_atomic_read
 23 
 24 static __always_inline void arch_atomic_set(atomic_t *v, int i)
 25 {
 26         __atomic_set(v, i);
 27 }
 28 #define arch_atomic_set arch_atomic_set
 29 
 30 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
 31 {
 32         return __atomic_add_barrier(i, &v->counter) + i;
 33 }
 34 #define arch_atomic_add_return arch_atomic_add_return
 35 
 36 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
 37 {
 38         return __atomic_add_barrier(i, &v->counter);
 39 }
 40 #define arch_atomic_fetch_add arch_atomic_fetch_add
 41 
 42 static __always_inline void arch_atomic_add(int i, atomic_t *v)
 43 {
 44         __atomic_add(i, &v->counter);
 45 }
 46 #define arch_atomic_add arch_atomic_add
 47 
 48 #define arch_atomic_sub(_i, _v)         arch_atomic_add(-(int)(_i), _v)
 49 #define arch_atomic_sub_return(_i, _v)  arch_atomic_add_return(-(int)(_i), _v)
 50 #define arch_atomic_fetch_sub(_i, _v)   arch_atomic_fetch_add(-(int)(_i), _v)
 51 
 52 #define ATOMIC_OPS(op)                                                  \
 53 static __always_inline void arch_atomic_##op(int i, atomic_t *v)        \
 54 {                                                                       \
 55         __atomic_##op(i, &v->counter);                                  \
 56 }                                                                       \
 57 static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v)   \
 58 {                                                                       \
 59         return __atomic_##op##_barrier(i, &v->counter);                 \
 60 }
 61 
 62 ATOMIC_OPS(and)
 63 ATOMIC_OPS(or)
 64 ATOMIC_OPS(xor)
 65 
 66 #undef ATOMIC_OPS
 67 
 68 #define arch_atomic_and                 arch_atomic_and
 69 #define arch_atomic_or                  arch_atomic_or
 70 #define arch_atomic_xor                 arch_atomic_xor
 71 #define arch_atomic_fetch_and           arch_atomic_fetch_and
 72 #define arch_atomic_fetch_or            arch_atomic_fetch_or
 73 #define arch_atomic_fetch_xor           arch_atomic_fetch_xor
 74 
 75 #define arch_atomic_xchg(v, new)        (arch_xchg(&((v)->counter), new))
 76 
 77 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 78 {
 79         return __atomic_cmpxchg(&v->counter, old, new);
 80 }
 81 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
 82 
 83 #define ATOMIC64_INIT(i)  { (i) }
 84 
 85 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
 86 {
 87         return __atomic64_read(v);
 88 }
 89 #define arch_atomic64_read arch_atomic64_read
 90 
 91 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
 92 {
 93         __atomic64_set(v, i);
 94 }
 95 #define arch_atomic64_set arch_atomic64_set
 96 
 97 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
 98 {
 99         return __atomic64_add_barrier(i, (long *)&v->counter) + i;
100 }
101 #define arch_atomic64_add_return arch_atomic64_add_return
102 
103 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
104 {
105         return __atomic64_add_barrier(i, (long *)&v->counter);
106 }
107 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
108 
109 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
110 {
111         __atomic64_add(i, (long *)&v->counter);
112 }
113 #define arch_atomic64_add arch_atomic64_add
114 
115 #define arch_atomic64_xchg(v, new)      (arch_xchg(&((v)->counter), new))
116 
117 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
118 {
119         return __atomic64_cmpxchg((long *)&v->counter, old, new);
120 }
121 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
122 
123 #define ATOMIC64_OPS(op)                                                        \
124 static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v)            \
125 {                                                                               \
126         __atomic64_##op(i, (long *)&v->counter);                                \
127 }                                                                               \
128 static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v)      \
129 {                                                                               \
130         return __atomic64_##op##_barrier(i, (long *)&v->counter);               \
131 }
132 
133 ATOMIC64_OPS(and)
134 ATOMIC64_OPS(or)
135 ATOMIC64_OPS(xor)
136 
137 #undef ATOMIC64_OPS
138 
139 #define arch_atomic64_and               arch_atomic64_and
140 #define arch_atomic64_or                arch_atomic64_or
141 #define arch_atomic64_xor               arch_atomic64_xor
142 #define arch_atomic64_fetch_and         arch_atomic64_fetch_and
143 #define arch_atomic64_fetch_or          arch_atomic64_fetch_or
144 #define arch_atomic64_fetch_xor         arch_atomic64_fetch_xor
145 
146 #define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
147 #define arch_atomic64_fetch_sub(_i, _v)  arch_atomic64_fetch_add(-(s64)(_i), _v)
148 #define arch_atomic64_sub(_i, _v)        arch_atomic64_add(-(s64)(_i), _v)
149 
150 #endif /* __ARCH_S390_ATOMIC__  */
151 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php