~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/atomic64_64.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_ATOMIC64_64_H
  3 #define _ASM_X86_ATOMIC64_64_H
  4 
  5 #include <linux/types.h>
  6 #include <asm/alternative.h>
  7 #include <asm/cmpxchg.h>
  8 
  9 /* The 64-bit atomic type */
 10 
 11 #define ATOMIC64_INIT(i)        { (i) }
 12 
 13 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
 14 {
 15         return __READ_ONCE((v)->counter);
 16 }
 17 
 18 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
 19 {
 20         __WRITE_ONCE(v->counter, i);
 21 }
 22 
 23 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
 24 {
 25         asm volatile(LOCK_PREFIX "addq %1,%0"
 26                      : "=m" (v->counter)
 27                      : "er" (i), "m" (v->counter) : "memory");
 28 }
 29 
 30 static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
 31 {
 32         asm volatile(LOCK_PREFIX "subq %1,%0"
 33                      : "=m" (v->counter)
 34                      : "er" (i), "m" (v->counter) : "memory");
 35 }
 36 
 37 static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
 38 {
 39         return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
 40 }
 41 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 42 
 43 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 44 {
 45         asm volatile(LOCK_PREFIX "incq %0"
 46                      : "=m" (v->counter)
 47                      : "m" (v->counter) : "memory");
 48 }
 49 #define arch_atomic64_inc arch_atomic64_inc
 50 
 51 static __always_inline void arch_atomic64_dec(atomic64_t *v)
 52 {
 53         asm volatile(LOCK_PREFIX "decq %0"
 54                      : "=m" (v->counter)
 55                      : "m" (v->counter) : "memory");
 56 }
 57 #define arch_atomic64_dec arch_atomic64_dec
 58 
 59 static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
 60 {
 61         return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
 62 }
 63 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 64 
 65 static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
 66 {
 67         return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
 68 }
 69 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 70 
 71 static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
 72 {
 73         return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
 74 }
 75 #define arch_atomic64_add_negative arch_atomic64_add_negative
 76 
 77 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
 78 {
 79         return i + xadd(&v->counter, i);
 80 }
 81 #define arch_atomic64_add_return arch_atomic64_add_return
 82 
 83 #define arch_atomic64_sub_return(i, v) arch_atomic64_add_return(-(i), v)
 84 
 85 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
 86 {
 87         return xadd(&v->counter, i);
 88 }
 89 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
 90 
 91 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), v)
 92 
 93 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
 94 {
 95         return arch_cmpxchg(&v->counter, old, new);
 96 }
 97 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
 98 
 99 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
100 {
101         return arch_try_cmpxchg(&v->counter, old, new);
102 }
103 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
104 
105 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
106 {
107         return arch_xchg(&v->counter, new);
108 }
109 #define arch_atomic64_xchg arch_atomic64_xchg
110 
111 static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
112 {
113         asm volatile(LOCK_PREFIX "andq %1,%0"
114                         : "+m" (v->counter)
115                         : "er" (i)
116                         : "memory");
117 }
118 
119 static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
120 {
121         s64 val = arch_atomic64_read(v);
122 
123         do {
124         } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
125         return val;
126 }
127 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
128 
129 static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
130 {
131         asm volatile(LOCK_PREFIX "orq %1,%0"
132                         : "+m" (v->counter)
133                         : "er" (i)
134                         : "memory");
135 }
136 
137 static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
138 {
139         s64 val = arch_atomic64_read(v);
140 
141         do {
142         } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
143         return val;
144 }
145 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
146 
147 static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
148 {
149         asm volatile(LOCK_PREFIX "xorq %1,%0"
150                         : "+m" (v->counter)
151                         : "er" (i)
152                         : "memory");
153 }
154 
155 static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
156 {
157         s64 val = arch_atomic64_read(v);
158 
159         do {
160         } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
161         return val;
162 }
163 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
164 
165 #endif /* _ASM_X86_ATOMIC64_64_H */
166 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php