~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/atomic.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_ATOMIC_H
  3 #define _ASM_X86_ATOMIC_H
  4 
  5 #include <linux/compiler.h>
  6 #include <linux/types.h>
  7 #include <asm/alternative.h>
  8 #include <asm/cmpxchg.h>
  9 #include <asm/rmwcc.h>
 10 #include <asm/barrier.h>
 11 
 12 /*
 13  * Atomic operations that C can't guarantee us.  Useful for
 14  * resource counting etc..
 15  */
 16 
 17 static __always_inline int arch_atomic_read(const atomic_t *v)
 18 {
 19         /*
 20          * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
 21          * it's non-inlined function that increases binary size and stack usage.
 22          */
 23         return __READ_ONCE((v)->counter);
 24 }
 25 
 26 static __always_inline void arch_atomic_set(atomic_t *v, int i)
 27 {
 28         __WRITE_ONCE(v->counter, i);
 29 }
 30 
 31 static __always_inline void arch_atomic_add(int i, atomic_t *v)
 32 {
 33         asm volatile(LOCK_PREFIX "addl %1,%0"
 34                      : "+m" (v->counter)
 35                      : "ir" (i) : "memory");
 36 }
 37 
 38 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
 39 {
 40         asm volatile(LOCK_PREFIX "subl %1,%0"
 41                      : "+m" (v->counter)
 42                      : "ir" (i) : "memory");
 43 }
 44 
 45 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
 46 {
 47         return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
 48 }
 49 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
 50 
 51 static __always_inline void arch_atomic_inc(atomic_t *v)
 52 {
 53         asm volatile(LOCK_PREFIX "incl %0"
 54                      : "+m" (v->counter) :: "memory");
 55 }
 56 #define arch_atomic_inc arch_atomic_inc
 57 
 58 static __always_inline void arch_atomic_dec(atomic_t *v)
 59 {
 60         asm volatile(LOCK_PREFIX "decl %0"
 61                      : "+m" (v->counter) :: "memory");
 62 }
 63 #define arch_atomic_dec arch_atomic_dec
 64 
 65 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
 66 {
 67         return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
 68 }
 69 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
 70 
 71 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
 72 {
 73         return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
 74 }
 75 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
 76 
 77 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
 78 {
 79         return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
 80 }
 81 #define arch_atomic_add_negative arch_atomic_add_negative
 82 
 83 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
 84 {
 85         return i + xadd(&v->counter, i);
 86 }
 87 #define arch_atomic_add_return arch_atomic_add_return
 88 
 89 #define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v)
 90 
 91 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
 92 {
 93         return xadd(&v->counter, i);
 94 }
 95 #define arch_atomic_fetch_add arch_atomic_fetch_add
 96 
 97 #define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)
 98 
 99 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
100 {
101         return arch_cmpxchg(&v->counter, old, new);
102 }
103 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
104 
105 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
106 {
107         return arch_try_cmpxchg(&v->counter, old, new);
108 }
109 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
110 
111 static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
112 {
113         return arch_xchg(&v->counter, new);
114 }
115 #define arch_atomic_xchg arch_atomic_xchg
116 
117 static __always_inline void arch_atomic_and(int i, atomic_t *v)
118 {
119         asm volatile(LOCK_PREFIX "andl %1,%0"
120                         : "+m" (v->counter)
121                         : "ir" (i)
122                         : "memory");
123 }
124 
125 static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
126 {
127         int val = arch_atomic_read(v);
128 
129         do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
130 
131         return val;
132 }
133 #define arch_atomic_fetch_and arch_atomic_fetch_and
134 
135 static __always_inline void arch_atomic_or(int i, atomic_t *v)
136 {
137         asm volatile(LOCK_PREFIX "orl %1,%0"
138                         : "+m" (v->counter)
139                         : "ir" (i)
140                         : "memory");
141 }
142 
143 static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
144 {
145         int val = arch_atomic_read(v);
146 
147         do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
148 
149         return val;
150 }
151 #define arch_atomic_fetch_or arch_atomic_fetch_or
152 
153 static __always_inline void arch_atomic_xor(int i, atomic_t *v)
154 {
155         asm volatile(LOCK_PREFIX "xorl %1,%0"
156                         : "+m" (v->counter)
157                         : "ir" (i)
158                         : "memory");
159 }
160 
161 static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
162 {
163         int val = arch_atomic_read(v);
164 
165         do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
166 
167         return val;
168 }
169 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
170 
171 #ifdef CONFIG_X86_32
172 # include <asm/atomic64_32.h>
173 #else
174 # include <asm/atomic64_64.h>
175 #endif
176 
177 #endif /* _ASM_X86_ATOMIC_H */
178 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php