~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/atomic/atomic-arch-fallback.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/atomic/atomic-arch-fallback.h (Architecture mips) and /include/linux/atomic/atomic-arch-fallback.h (Architecture m68k)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2                                                     2 
  3 // Generated by scripts/atomic/gen-atomic-fall      3 // Generated by scripts/atomic/gen-atomic-fallback.sh
  4 // DO NOT MODIFY THIS FILE DIRECTLY                 4 // DO NOT MODIFY THIS FILE DIRECTLY
  5                                                     5 
  6 #ifndef _LINUX_ATOMIC_FALLBACK_H                    6 #ifndef _LINUX_ATOMIC_FALLBACK_H
  7 #define _LINUX_ATOMIC_FALLBACK_H                    7 #define _LINUX_ATOMIC_FALLBACK_H
  8                                                     8 
  9 #include <linux/compiler.h>                         9 #include <linux/compiler.h>
 10                                                    10 
 11 #if defined(arch_xchg)                             11 #if defined(arch_xchg)
 12 #define raw_xchg arch_xchg                         12 #define raw_xchg arch_xchg
 13 #elif defined(arch_xchg_relaxed)                   13 #elif defined(arch_xchg_relaxed)
 14 #define raw_xchg(...) \                            14 #define raw_xchg(...) \
 15         __atomic_op_fence(arch_xchg, __VA_ARGS     15         __atomic_op_fence(arch_xchg, __VA_ARGS__)
 16 #else                                              16 #else
 17 extern void raw_xchg_not_implemented(void);        17 extern void raw_xchg_not_implemented(void);
 18 #define raw_xchg(...) raw_xchg_not_implemented     18 #define raw_xchg(...) raw_xchg_not_implemented()
 19 #endif                                             19 #endif
 20                                                    20 
 21 #if defined(arch_xchg_acquire)                     21 #if defined(arch_xchg_acquire)
 22 #define raw_xchg_acquire arch_xchg_acquire         22 #define raw_xchg_acquire arch_xchg_acquire
 23 #elif defined(arch_xchg_relaxed)                   23 #elif defined(arch_xchg_relaxed)
 24 #define raw_xchg_acquire(...) \                    24 #define raw_xchg_acquire(...) \
 25         __atomic_op_acquire(arch_xchg, __VA_AR     25         __atomic_op_acquire(arch_xchg, __VA_ARGS__)
 26 #elif defined(arch_xchg)                           26 #elif defined(arch_xchg)
 27 #define raw_xchg_acquire arch_xchg                 27 #define raw_xchg_acquire arch_xchg
 28 #else                                              28 #else
 29 extern void raw_xchg_acquire_not_implemented(v     29 extern void raw_xchg_acquire_not_implemented(void);
 30 #define raw_xchg_acquire(...) raw_xchg_acquire     30 #define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
 31 #endif                                             31 #endif
 32                                                    32 
 33 #if defined(arch_xchg_release)                     33 #if defined(arch_xchg_release)
 34 #define raw_xchg_release arch_xchg_release         34 #define raw_xchg_release arch_xchg_release
 35 #elif defined(arch_xchg_relaxed)                   35 #elif defined(arch_xchg_relaxed)
 36 #define raw_xchg_release(...) \                    36 #define raw_xchg_release(...) \
 37         __atomic_op_release(arch_xchg, __VA_AR     37         __atomic_op_release(arch_xchg, __VA_ARGS__)
 38 #elif defined(arch_xchg)                           38 #elif defined(arch_xchg)
 39 #define raw_xchg_release arch_xchg                 39 #define raw_xchg_release arch_xchg
 40 #else                                              40 #else
 41 extern void raw_xchg_release_not_implemented(v     41 extern void raw_xchg_release_not_implemented(void);
 42 #define raw_xchg_release(...) raw_xchg_release     42 #define raw_xchg_release(...) raw_xchg_release_not_implemented()
 43 #endif                                             43 #endif
 44                                                    44 
 45 #if defined(arch_xchg_relaxed)                     45 #if defined(arch_xchg_relaxed)
 46 #define raw_xchg_relaxed arch_xchg_relaxed         46 #define raw_xchg_relaxed arch_xchg_relaxed
 47 #elif defined(arch_xchg)                           47 #elif defined(arch_xchg)
 48 #define raw_xchg_relaxed arch_xchg                 48 #define raw_xchg_relaxed arch_xchg
 49 #else                                              49 #else
 50 extern void raw_xchg_relaxed_not_implemented(v     50 extern void raw_xchg_relaxed_not_implemented(void);
 51 #define raw_xchg_relaxed(...) raw_xchg_relaxed     51 #define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
 52 #endif                                             52 #endif
 53                                                    53 
 54 #if defined(arch_cmpxchg)                          54 #if defined(arch_cmpxchg)
 55 #define raw_cmpxchg arch_cmpxchg                   55 #define raw_cmpxchg arch_cmpxchg
 56 #elif defined(arch_cmpxchg_relaxed)                56 #elif defined(arch_cmpxchg_relaxed)
 57 #define raw_cmpxchg(...) \                         57 #define raw_cmpxchg(...) \
 58         __atomic_op_fence(arch_cmpxchg, __VA_A     58         __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
 59 #else                                              59 #else
 60 extern void raw_cmpxchg_not_implemented(void);     60 extern void raw_cmpxchg_not_implemented(void);
 61 #define raw_cmpxchg(...) raw_cmpxchg_not_imple     61 #define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
 62 #endif                                             62 #endif
 63                                                    63 
 64 #if defined(arch_cmpxchg_acquire)                  64 #if defined(arch_cmpxchg_acquire)
 65 #define raw_cmpxchg_acquire arch_cmpxchg_acqui     65 #define raw_cmpxchg_acquire arch_cmpxchg_acquire
 66 #elif defined(arch_cmpxchg_relaxed)                66 #elif defined(arch_cmpxchg_relaxed)
 67 #define raw_cmpxchg_acquire(...) \                 67 #define raw_cmpxchg_acquire(...) \
 68         __atomic_op_acquire(arch_cmpxchg, __VA     68         __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
 69 #elif defined(arch_cmpxchg)                        69 #elif defined(arch_cmpxchg)
 70 #define raw_cmpxchg_acquire arch_cmpxchg           70 #define raw_cmpxchg_acquire arch_cmpxchg
 71 #else                                              71 #else
 72 extern void raw_cmpxchg_acquire_not_implemente     72 extern void raw_cmpxchg_acquire_not_implemented(void);
 73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_a     73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
 74 #endif                                             74 #endif
 75                                                    75 
 76 #if defined(arch_cmpxchg_release)                  76 #if defined(arch_cmpxchg_release)
 77 #define raw_cmpxchg_release arch_cmpxchg_relea     77 #define raw_cmpxchg_release arch_cmpxchg_release
 78 #elif defined(arch_cmpxchg_relaxed)                78 #elif defined(arch_cmpxchg_relaxed)
 79 #define raw_cmpxchg_release(...) \                 79 #define raw_cmpxchg_release(...) \
 80         __atomic_op_release(arch_cmpxchg, __VA     80         __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
 81 #elif defined(arch_cmpxchg)                        81 #elif defined(arch_cmpxchg)
 82 #define raw_cmpxchg_release arch_cmpxchg           82 #define raw_cmpxchg_release arch_cmpxchg
 83 #else                                              83 #else
 84 extern void raw_cmpxchg_release_not_implemente     84 extern void raw_cmpxchg_release_not_implemented(void);
 85 #define raw_cmpxchg_release(...) raw_cmpxchg_r     85 #define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
 86 #endif                                             86 #endif
 87                                                    87 
 88 #if defined(arch_cmpxchg_relaxed)                  88 #if defined(arch_cmpxchg_relaxed)
 89 #define raw_cmpxchg_relaxed arch_cmpxchg_relax     89 #define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
 90 #elif defined(arch_cmpxchg)                        90 #elif defined(arch_cmpxchg)
 91 #define raw_cmpxchg_relaxed arch_cmpxchg           91 #define raw_cmpxchg_relaxed arch_cmpxchg
 92 #else                                              92 #else
 93 extern void raw_cmpxchg_relaxed_not_implemente     93 extern void raw_cmpxchg_relaxed_not_implemented(void);
 94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_r     94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
 95 #endif                                             95 #endif
 96                                                    96 
 97 #if defined(arch_cmpxchg64)                        97 #if defined(arch_cmpxchg64)
 98 #define raw_cmpxchg64 arch_cmpxchg64               98 #define raw_cmpxchg64 arch_cmpxchg64
 99 #elif defined(arch_cmpxchg64_relaxed)              99 #elif defined(arch_cmpxchg64_relaxed)
100 #define raw_cmpxchg64(...) \                      100 #define raw_cmpxchg64(...) \
101         __atomic_op_fence(arch_cmpxchg64, __VA    101         __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
102 #else                                             102 #else
103 extern void raw_cmpxchg64_not_implemented(void    103 extern void raw_cmpxchg64_not_implemented(void);
104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_i    104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
105 #endif                                            105 #endif
106                                                   106 
107 #if defined(arch_cmpxchg64_acquire)               107 #if defined(arch_cmpxchg64_acquire)
108 #define raw_cmpxchg64_acquire arch_cmpxchg64_a    108 #define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
109 #elif defined(arch_cmpxchg64_relaxed)             109 #elif defined(arch_cmpxchg64_relaxed)
110 #define raw_cmpxchg64_acquire(...) \              110 #define raw_cmpxchg64_acquire(...) \
111         __atomic_op_acquire(arch_cmpxchg64, __    111         __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
112 #elif defined(arch_cmpxchg64)                     112 #elif defined(arch_cmpxchg64)
113 #define raw_cmpxchg64_acquire arch_cmpxchg64      113 #define raw_cmpxchg64_acquire arch_cmpxchg64
114 #else                                             114 #else
115 extern void raw_cmpxchg64_acquire_not_implemen    115 extern void raw_cmpxchg64_acquire_not_implemented(void);
116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg    116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
117 #endif                                            117 #endif
118                                                   118 
119 #if defined(arch_cmpxchg64_release)               119 #if defined(arch_cmpxchg64_release)
120 #define raw_cmpxchg64_release arch_cmpxchg64_r    120 #define raw_cmpxchg64_release arch_cmpxchg64_release
121 #elif defined(arch_cmpxchg64_relaxed)             121 #elif defined(arch_cmpxchg64_relaxed)
122 #define raw_cmpxchg64_release(...) \              122 #define raw_cmpxchg64_release(...) \
123         __atomic_op_release(arch_cmpxchg64, __    123         __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
124 #elif defined(arch_cmpxchg64)                     124 #elif defined(arch_cmpxchg64)
125 #define raw_cmpxchg64_release arch_cmpxchg64      125 #define raw_cmpxchg64_release arch_cmpxchg64
126 #else                                             126 #else
127 extern void raw_cmpxchg64_release_not_implemen    127 extern void raw_cmpxchg64_release_not_implemented(void);
128 #define raw_cmpxchg64_release(...) raw_cmpxchg    128 #define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
129 #endif                                            129 #endif
130                                                   130 
131 #if defined(arch_cmpxchg64_relaxed)               131 #if defined(arch_cmpxchg64_relaxed)
132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_r    132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
133 #elif defined(arch_cmpxchg64)                     133 #elif defined(arch_cmpxchg64)
134 #define raw_cmpxchg64_relaxed arch_cmpxchg64      134 #define raw_cmpxchg64_relaxed arch_cmpxchg64
135 #else                                             135 #else
136 extern void raw_cmpxchg64_relaxed_not_implemen    136 extern void raw_cmpxchg64_relaxed_not_implemented(void);
137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg    137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
138 #endif                                            138 #endif
139                                                   139 
140 #if defined(arch_cmpxchg128)                      140 #if defined(arch_cmpxchg128)
141 #define raw_cmpxchg128 arch_cmpxchg128            141 #define raw_cmpxchg128 arch_cmpxchg128
142 #elif defined(arch_cmpxchg128_relaxed)            142 #elif defined(arch_cmpxchg128_relaxed)
143 #define raw_cmpxchg128(...) \                     143 #define raw_cmpxchg128(...) \
144         __atomic_op_fence(arch_cmpxchg128, __V    144         __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
145 #else                                             145 #else
146 extern void raw_cmpxchg128_not_implemented(voi    146 extern void raw_cmpxchg128_not_implemented(void);
147 #define raw_cmpxchg128(...) raw_cmpxchg128_not    147 #define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
148 #endif                                            148 #endif
149                                                   149 
150 #if defined(arch_cmpxchg128_acquire)              150 #if defined(arch_cmpxchg128_acquire)
151 #define raw_cmpxchg128_acquire arch_cmpxchg128    151 #define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
152 #elif defined(arch_cmpxchg128_relaxed)            152 #elif defined(arch_cmpxchg128_relaxed)
153 #define raw_cmpxchg128_acquire(...) \             153 #define raw_cmpxchg128_acquire(...) \
154         __atomic_op_acquire(arch_cmpxchg128, _    154         __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
155 #elif defined(arch_cmpxchg128)                    155 #elif defined(arch_cmpxchg128)
156 #define raw_cmpxchg128_acquire arch_cmpxchg128    156 #define raw_cmpxchg128_acquire arch_cmpxchg128
157 #else                                             157 #else
158 extern void raw_cmpxchg128_acquire_not_impleme    158 extern void raw_cmpxchg128_acquire_not_implemented(void);
159 #define raw_cmpxchg128_acquire(...) raw_cmpxch    159 #define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
160 #endif                                            160 #endif
161                                                   161 
162 #if defined(arch_cmpxchg128_release)              162 #if defined(arch_cmpxchg128_release)
163 #define raw_cmpxchg128_release arch_cmpxchg128    163 #define raw_cmpxchg128_release arch_cmpxchg128_release
164 #elif defined(arch_cmpxchg128_relaxed)            164 #elif defined(arch_cmpxchg128_relaxed)
165 #define raw_cmpxchg128_release(...) \             165 #define raw_cmpxchg128_release(...) \
166         __atomic_op_release(arch_cmpxchg128, _    166         __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
167 #elif defined(arch_cmpxchg128)                    167 #elif defined(arch_cmpxchg128)
168 #define raw_cmpxchg128_release arch_cmpxchg128    168 #define raw_cmpxchg128_release arch_cmpxchg128
169 #else                                             169 #else
170 extern void raw_cmpxchg128_release_not_impleme    170 extern void raw_cmpxchg128_release_not_implemented(void);
171 #define raw_cmpxchg128_release(...) raw_cmpxch    171 #define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
172 #endif                                            172 #endif
173                                                   173 
174 #if defined(arch_cmpxchg128_relaxed)              174 #if defined(arch_cmpxchg128_relaxed)
175 #define raw_cmpxchg128_relaxed arch_cmpxchg128    175 #define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
176 #elif defined(arch_cmpxchg128)                    176 #elif defined(arch_cmpxchg128)
177 #define raw_cmpxchg128_relaxed arch_cmpxchg128    177 #define raw_cmpxchg128_relaxed arch_cmpxchg128
178 #else                                             178 #else
179 extern void raw_cmpxchg128_relaxed_not_impleme    179 extern void raw_cmpxchg128_relaxed_not_implemented(void);
180 #define raw_cmpxchg128_relaxed(...) raw_cmpxch    180 #define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
181 #endif                                            181 #endif
182                                                   182 
183 #if defined(arch_try_cmpxchg)                     183 #if defined(arch_try_cmpxchg)
184 #define raw_try_cmpxchg arch_try_cmpxchg          184 #define raw_try_cmpxchg arch_try_cmpxchg
185 #elif defined(arch_try_cmpxchg_relaxed)           185 #elif defined(arch_try_cmpxchg_relaxed)
186 #define raw_try_cmpxchg(...) \                    186 #define raw_try_cmpxchg(...) \
187         __atomic_op_fence(arch_try_cmpxchg, __    187         __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
188 #else                                             188 #else
189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \      189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \
190 ({ \                                              190 ({ \
191         typeof(*(_ptr)) *___op = (_oldp), ___o    191         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
192         ___r = raw_cmpxchg((_ptr), ___o, (_new    192         ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
193         if (unlikely(___r != ___o)) \             193         if (unlikely(___r != ___o)) \
194                 *___op = ___r; \                  194                 *___op = ___r; \
195         likely(___r == ___o); \                   195         likely(___r == ___o); \
196 })                                                196 })
197 #endif                                            197 #endif
198                                                   198 
199 #if defined(arch_try_cmpxchg_acquire)             199 #if defined(arch_try_cmpxchg_acquire)
200 #define raw_try_cmpxchg_acquire arch_try_cmpxc    200 #define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
201 #elif defined(arch_try_cmpxchg_relaxed)           201 #elif defined(arch_try_cmpxchg_relaxed)
202 #define raw_try_cmpxchg_acquire(...) \            202 #define raw_try_cmpxchg_acquire(...) \
203         __atomic_op_acquire(arch_try_cmpxchg,     203         __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
204 #elif defined(arch_try_cmpxchg)                   204 #elif defined(arch_try_cmpxchg)
205 #define raw_try_cmpxchg_acquire arch_try_cmpxc    205 #define raw_try_cmpxchg_acquire arch_try_cmpxchg
206 #else                                             206 #else
207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _    207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
208 ({ \                                              208 ({ \
209         typeof(*(_ptr)) *___op = (_oldp), ___o    209         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
210         ___r = raw_cmpxchg_acquire((_ptr), ___    210         ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
211         if (unlikely(___r != ___o)) \             211         if (unlikely(___r != ___o)) \
212                 *___op = ___r; \                  212                 *___op = ___r; \
213         likely(___r == ___o); \                   213         likely(___r == ___o); \
214 })                                                214 })
215 #endif                                            215 #endif
216                                                   216 
217 #if defined(arch_try_cmpxchg_release)             217 #if defined(arch_try_cmpxchg_release)
218 #define raw_try_cmpxchg_release arch_try_cmpxc    218 #define raw_try_cmpxchg_release arch_try_cmpxchg_release
219 #elif defined(arch_try_cmpxchg_relaxed)           219 #elif defined(arch_try_cmpxchg_relaxed)
220 #define raw_try_cmpxchg_release(...) \            220 #define raw_try_cmpxchg_release(...) \
221         __atomic_op_release(arch_try_cmpxchg,     221         __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
222 #elif defined(arch_try_cmpxchg)                   222 #elif defined(arch_try_cmpxchg)
223 #define raw_try_cmpxchg_release arch_try_cmpxc    223 #define raw_try_cmpxchg_release arch_try_cmpxchg
224 #else                                             224 #else
225 #define raw_try_cmpxchg_release(_ptr, _oldp, _    225 #define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
226 ({ \                                              226 ({ \
227         typeof(*(_ptr)) *___op = (_oldp), ___o    227         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
228         ___r = raw_cmpxchg_release((_ptr), ___    228         ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
229         if (unlikely(___r != ___o)) \             229         if (unlikely(___r != ___o)) \
230                 *___op = ___r; \                  230                 *___op = ___r; \
231         likely(___r == ___o); \                   231         likely(___r == ___o); \
232 })                                                232 })
233 #endif                                            233 #endif
234                                                   234 
235 #if defined(arch_try_cmpxchg_relaxed)             235 #if defined(arch_try_cmpxchg_relaxed)
236 #define raw_try_cmpxchg_relaxed arch_try_cmpxc    236 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
237 #elif defined(arch_try_cmpxchg)                   237 #elif defined(arch_try_cmpxchg)
238 #define raw_try_cmpxchg_relaxed arch_try_cmpxc    238 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg
239 #else                                             239 #else
240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _    240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
241 ({ \                                              241 ({ \
242         typeof(*(_ptr)) *___op = (_oldp), ___o    242         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
243         ___r = raw_cmpxchg_relaxed((_ptr), ___    243         ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
244         if (unlikely(___r != ___o)) \             244         if (unlikely(___r != ___o)) \
245                 *___op = ___r; \                  245                 *___op = ___r; \
246         likely(___r == ___o); \                   246         likely(___r == ___o); \
247 })                                                247 })
248 #endif                                            248 #endif
249                                                   249 
250 #if defined(arch_try_cmpxchg64)                   250 #if defined(arch_try_cmpxchg64)
251 #define raw_try_cmpxchg64 arch_try_cmpxchg64      251 #define raw_try_cmpxchg64 arch_try_cmpxchg64
252 #elif defined(arch_try_cmpxchg64_relaxed)         252 #elif defined(arch_try_cmpxchg64_relaxed)
253 #define raw_try_cmpxchg64(...) \                  253 #define raw_try_cmpxchg64(...) \
254         __atomic_op_fence(arch_try_cmpxchg64,     254         __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
255 #else                                             255 #else
256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \    256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \
257 ({ \                                              257 ({ \
258         typeof(*(_ptr)) *___op = (_oldp), ___o    258         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
259         ___r = raw_cmpxchg64((_ptr), ___o, (_n    259         ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
260         if (unlikely(___r != ___o)) \             260         if (unlikely(___r != ___o)) \
261                 *___op = ___r; \                  261                 *___op = ___r; \
262         likely(___r == ___o); \                   262         likely(___r == ___o); \
263 })                                                263 })
264 #endif                                            264 #endif
265                                                   265 
266 #if defined(arch_try_cmpxchg64_acquire)           266 #if defined(arch_try_cmpxchg64_acquire)
267 #define raw_try_cmpxchg64_acquire arch_try_cmp    267 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
268 #elif defined(arch_try_cmpxchg64_relaxed)         268 #elif defined(arch_try_cmpxchg64_relaxed)
269 #define raw_try_cmpxchg64_acquire(...) \          269 #define raw_try_cmpxchg64_acquire(...) \
270         __atomic_op_acquire(arch_try_cmpxchg64    270         __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
271 #elif defined(arch_try_cmpxchg64)                 271 #elif defined(arch_try_cmpxchg64)
272 #define raw_try_cmpxchg64_acquire arch_try_cmp    272 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
273 #else                                             273 #else
274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp,    274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
275 ({ \                                              275 ({ \
276         typeof(*(_ptr)) *___op = (_oldp), ___o    276         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
277         ___r = raw_cmpxchg64_acquire((_ptr), _    277         ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
278         if (unlikely(___r != ___o)) \             278         if (unlikely(___r != ___o)) \
279                 *___op = ___r; \                  279                 *___op = ___r; \
280         likely(___r == ___o); \                   280         likely(___r == ___o); \
281 })                                                281 })
282 #endif                                            282 #endif
283                                                   283 
284 #if defined(arch_try_cmpxchg64_release)           284 #if defined(arch_try_cmpxchg64_release)
285 #define raw_try_cmpxchg64_release arch_try_cmp    285 #define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
286 #elif defined(arch_try_cmpxchg64_relaxed)         286 #elif defined(arch_try_cmpxchg64_relaxed)
287 #define raw_try_cmpxchg64_release(...) \          287 #define raw_try_cmpxchg64_release(...) \
288         __atomic_op_release(arch_try_cmpxchg64    288         __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
289 #elif defined(arch_try_cmpxchg64)                 289 #elif defined(arch_try_cmpxchg64)
290 #define raw_try_cmpxchg64_release arch_try_cmp    290 #define raw_try_cmpxchg64_release arch_try_cmpxchg64
291 #else                                             291 #else
292 #define raw_try_cmpxchg64_release(_ptr, _oldp,    292 #define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
293 ({ \                                              293 ({ \
294         typeof(*(_ptr)) *___op = (_oldp), ___o    294         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
295         ___r = raw_cmpxchg64_release((_ptr), _    295         ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
296         if (unlikely(___r != ___o)) \             296         if (unlikely(___r != ___o)) \
297                 *___op = ___r; \                  297                 *___op = ___r; \
298         likely(___r == ___o); \                   298         likely(___r == ___o); \
299 })                                                299 })
300 #endif                                            300 #endif
301                                                   301 
302 #if defined(arch_try_cmpxchg64_relaxed)           302 #if defined(arch_try_cmpxchg64_relaxed)
303 #define raw_try_cmpxchg64_relaxed arch_try_cmp    303 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
304 #elif defined(arch_try_cmpxchg64)                 304 #elif defined(arch_try_cmpxchg64)
305 #define raw_try_cmpxchg64_relaxed arch_try_cmp    305 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
306 #else                                             306 #else
307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp,    307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
308 ({ \                                              308 ({ \
309         typeof(*(_ptr)) *___op = (_oldp), ___o    309         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
310         ___r = raw_cmpxchg64_relaxed((_ptr), _    310         ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
311         if (unlikely(___r != ___o)) \             311         if (unlikely(___r != ___o)) \
312                 *___op = ___r; \                  312                 *___op = ___r; \
313         likely(___r == ___o); \                   313         likely(___r == ___o); \
314 })                                                314 })
315 #endif                                            315 #endif
316                                                   316 
317 #if defined(arch_try_cmpxchg128)                  317 #if defined(arch_try_cmpxchg128)
318 #define raw_try_cmpxchg128 arch_try_cmpxchg128    318 #define raw_try_cmpxchg128 arch_try_cmpxchg128
319 #elif defined(arch_try_cmpxchg128_relaxed)        319 #elif defined(arch_try_cmpxchg128_relaxed)
320 #define raw_try_cmpxchg128(...) \                 320 #define raw_try_cmpxchg128(...) \
321         __atomic_op_fence(arch_try_cmpxchg128,    321         __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
322 #else                                             322 #else
323 #define raw_try_cmpxchg128(_ptr, _oldp, _new)     323 #define raw_try_cmpxchg128(_ptr, _oldp, _new) \
324 ({ \                                              324 ({ \
325         typeof(*(_ptr)) *___op = (_oldp), ___o    325         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
326         ___r = raw_cmpxchg128((_ptr), ___o, (_    326         ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
327         if (unlikely(___r != ___o)) \             327         if (unlikely(___r != ___o)) \
328                 *___op = ___r; \                  328                 *___op = ___r; \
329         likely(___r == ___o); \                   329         likely(___r == ___o); \
330 })                                                330 })
331 #endif                                            331 #endif
332                                                   332 
333 #if defined(arch_try_cmpxchg128_acquire)          333 #if defined(arch_try_cmpxchg128_acquire)
334 #define raw_try_cmpxchg128_acquire arch_try_cm    334 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
335 #elif defined(arch_try_cmpxchg128_relaxed)        335 #elif defined(arch_try_cmpxchg128_relaxed)
336 #define raw_try_cmpxchg128_acquire(...) \         336 #define raw_try_cmpxchg128_acquire(...) \
337         __atomic_op_acquire(arch_try_cmpxchg12    337         __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
338 #elif defined(arch_try_cmpxchg128)                338 #elif defined(arch_try_cmpxchg128)
339 #define raw_try_cmpxchg128_acquire arch_try_cm    339 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
340 #else                                             340 #else
341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp    341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
342 ({ \                                              342 ({ \
343         typeof(*(_ptr)) *___op = (_oldp), ___o    343         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
344         ___r = raw_cmpxchg128_acquire((_ptr),     344         ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
345         if (unlikely(___r != ___o)) \             345         if (unlikely(___r != ___o)) \
346                 *___op = ___r; \                  346                 *___op = ___r; \
347         likely(___r == ___o); \                   347         likely(___r == ___o); \
348 })                                                348 })
349 #endif                                            349 #endif
350                                                   350 
351 #if defined(arch_try_cmpxchg128_release)          351 #if defined(arch_try_cmpxchg128_release)
352 #define raw_try_cmpxchg128_release arch_try_cm    352 #define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
353 #elif defined(arch_try_cmpxchg128_relaxed)        353 #elif defined(arch_try_cmpxchg128_relaxed)
354 #define raw_try_cmpxchg128_release(...) \         354 #define raw_try_cmpxchg128_release(...) \
355         __atomic_op_release(arch_try_cmpxchg12    355         __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
356 #elif defined(arch_try_cmpxchg128)                356 #elif defined(arch_try_cmpxchg128)
357 #define raw_try_cmpxchg128_release arch_try_cm    357 #define raw_try_cmpxchg128_release arch_try_cmpxchg128
358 #else                                             358 #else
359 #define raw_try_cmpxchg128_release(_ptr, _oldp    359 #define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
360 ({ \                                              360 ({ \
361         typeof(*(_ptr)) *___op = (_oldp), ___o    361         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
362         ___r = raw_cmpxchg128_release((_ptr),     362         ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
363         if (unlikely(___r != ___o)) \             363         if (unlikely(___r != ___o)) \
364                 *___op = ___r; \                  364                 *___op = ___r; \
365         likely(___r == ___o); \                   365         likely(___r == ___o); \
366 })                                                366 })
367 #endif                                            367 #endif
368                                                   368 
369 #if defined(arch_try_cmpxchg128_relaxed)          369 #if defined(arch_try_cmpxchg128_relaxed)
370 #define raw_try_cmpxchg128_relaxed arch_try_cm    370 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
371 #elif defined(arch_try_cmpxchg128)                371 #elif defined(arch_try_cmpxchg128)
372 #define raw_try_cmpxchg128_relaxed arch_try_cm    372 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
373 #else                                             373 #else
374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp    374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
375 ({ \                                              375 ({ \
376         typeof(*(_ptr)) *___op = (_oldp), ___o    376         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
377         ___r = raw_cmpxchg128_relaxed((_ptr),     377         ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
378         if (unlikely(___r != ___o)) \             378         if (unlikely(___r != ___o)) \
379                 *___op = ___r; \                  379                 *___op = ___r; \
380         likely(___r == ___o); \                   380         likely(___r == ___o); \
381 })                                                381 })
382 #endif                                            382 #endif
383                                                   383 
384 #define raw_cmpxchg_local arch_cmpxchg_local      384 #define raw_cmpxchg_local arch_cmpxchg_local
385                                                   385 
386 #ifdef arch_try_cmpxchg_local                     386 #ifdef arch_try_cmpxchg_local
387 #define raw_try_cmpxchg_local arch_try_cmpxchg    387 #define raw_try_cmpxchg_local arch_try_cmpxchg_local
388 #else                                             388 #else
389 #define raw_try_cmpxchg_local(_ptr, _oldp, _ne    389 #define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
390 ({ \                                              390 ({ \
391         typeof(*(_ptr)) *___op = (_oldp), ___o    391         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
392         ___r = raw_cmpxchg_local((_ptr), ___o,    392         ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
393         if (unlikely(___r != ___o)) \             393         if (unlikely(___r != ___o)) \
394                 *___op = ___r; \                  394                 *___op = ___r; \
395         likely(___r == ___o); \                   395         likely(___r == ___o); \
396 })                                                396 })
397 #endif                                            397 #endif
398                                                   398 
399 #define raw_cmpxchg64_local arch_cmpxchg64_loc    399 #define raw_cmpxchg64_local arch_cmpxchg64_local
400                                                   400 
401 #ifdef arch_try_cmpxchg64_local                   401 #ifdef arch_try_cmpxchg64_local
402 #define raw_try_cmpxchg64_local arch_try_cmpxc    402 #define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
403 #else                                             403 #else
404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _    404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
405 ({ \                                              405 ({ \
406         typeof(*(_ptr)) *___op = (_oldp), ___o    406         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
407         ___r = raw_cmpxchg64_local((_ptr), ___    407         ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
408         if (unlikely(___r != ___o)) \             408         if (unlikely(___r != ___o)) \
409                 *___op = ___r; \                  409                 *___op = ___r; \
410         likely(___r == ___o); \                   410         likely(___r == ___o); \
411 })                                                411 })
412 #endif                                            412 #endif
413                                                   413 
414 #define raw_cmpxchg128_local arch_cmpxchg128_l    414 #define raw_cmpxchg128_local arch_cmpxchg128_local
415                                                   415 
416 #ifdef arch_try_cmpxchg128_local                  416 #ifdef arch_try_cmpxchg128_local
417 #define raw_try_cmpxchg128_local arch_try_cmpx    417 #define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
418 #else                                             418 #else
419 #define raw_try_cmpxchg128_local(_ptr, _oldp,     419 #define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
420 ({ \                                              420 ({ \
421         typeof(*(_ptr)) *___op = (_oldp), ___o    421         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
422         ___r = raw_cmpxchg128_local((_ptr), __    422         ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
423         if (unlikely(___r != ___o)) \             423         if (unlikely(___r != ___o)) \
424                 *___op = ___r; \                  424                 *___op = ___r; \
425         likely(___r == ___o); \                   425         likely(___r == ___o); \
426 })                                                426 })
427 #endif                                            427 #endif
428                                                   428 
429 #define raw_sync_cmpxchg arch_sync_cmpxchg        429 #define raw_sync_cmpxchg arch_sync_cmpxchg
430                                                   430 
431 #ifdef arch_sync_try_cmpxchg                      431 #ifdef arch_sync_try_cmpxchg
432 #define raw_sync_try_cmpxchg arch_sync_try_cmp    432 #define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
433 #else                                             433 #else
434 #define raw_sync_try_cmpxchg(_ptr, _oldp, _new    434 #define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
435 ({ \                                              435 ({ \
436         typeof(*(_ptr)) *___op = (_oldp), ___o    436         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
437         ___r = raw_sync_cmpxchg((_ptr), ___o,     437         ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
438         if (unlikely(___r != ___o)) \             438         if (unlikely(___r != ___o)) \
439                 *___op = ___r; \                  439                 *___op = ___r; \
440         likely(___r == ___o); \                   440         likely(___r == ___o); \
441 })                                                441 })
442 #endif                                            442 #endif
443                                                   443 
444 /**                                               444 /**
445  * raw_atomic_read() - atomic load with relaxe    445  * raw_atomic_read() - atomic load with relaxed ordering
446  * @v: pointer to atomic_t                        446  * @v: pointer to atomic_t
447  *                                                447  *
448  * Atomically loads the value of @v with relax    448  * Atomically loads the value of @v with relaxed ordering.
449  *                                                449  *
450  * Safe to use in noinstr code; prefer atomic_    450  * Safe to use in noinstr code; prefer atomic_read() elsewhere.
451  *                                                451  *
452  * Return: The value loaded from @v.              452  * Return: The value loaded from @v.
453  */                                               453  */
454 static __always_inline int                        454 static __always_inline int
455 raw_atomic_read(const atomic_t *v)                455 raw_atomic_read(const atomic_t *v)
456 {                                                 456 {
457         return arch_atomic_read(v);               457         return arch_atomic_read(v);
458 }                                                 458 }
459                                                   459 
460 /**                                               460 /**
461  * raw_atomic_read_acquire() - atomic load wit    461  * raw_atomic_read_acquire() - atomic load with acquire ordering
462  * @v: pointer to atomic_t                        462  * @v: pointer to atomic_t
463  *                                                463  *
464  * Atomically loads the value of @v with acqui    464  * Atomically loads the value of @v with acquire ordering.
465  *                                                465  *
466  * Safe to use in noinstr code; prefer atomic_    466  * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
467  *                                                467  *
468  * Return: The value loaded from @v.              468  * Return: The value loaded from @v.
469  */                                               469  */
470 static __always_inline int                        470 static __always_inline int
471 raw_atomic_read_acquire(const atomic_t *v)        471 raw_atomic_read_acquire(const atomic_t *v)
472 {                                                 472 {
473 #if defined(arch_atomic_read_acquire)             473 #if defined(arch_atomic_read_acquire)
474         return arch_atomic_read_acquire(v);       474         return arch_atomic_read_acquire(v);
475 #else                                             475 #else
476         int ret;                                  476         int ret;
477                                                   477 
478         if (__native_word(atomic_t)) {            478         if (__native_word(atomic_t)) {
479                 ret = smp_load_acquire(&(v)->c    479                 ret = smp_load_acquire(&(v)->counter);
480         } else {                                  480         } else {
481                 ret = raw_atomic_read(v);         481                 ret = raw_atomic_read(v);
482                 __atomic_acquire_fence();         482                 __atomic_acquire_fence();
483         }                                         483         }
484                                                   484 
485         return ret;                               485         return ret;
486 #endif                                            486 #endif
487 }                                                 487 }
488                                                   488 
489 /**                                               489 /**
490  * raw_atomic_set() - atomic set with relaxed     490  * raw_atomic_set() - atomic set with relaxed ordering
491  * @v: pointer to atomic_t                        491  * @v: pointer to atomic_t
492  * @i: int value to assign                        492  * @i: int value to assign
493  *                                                493  *
494  * Atomically sets @v to @i with relaxed order    494  * Atomically sets @v to @i with relaxed ordering.
495  *                                                495  *
496  * Safe to use in noinstr code; prefer atomic_    496  * Safe to use in noinstr code; prefer atomic_set() elsewhere.
497  *                                                497  *
498  * Return: Nothing.                               498  * Return: Nothing.
499  */                                               499  */
500 static __always_inline void                       500 static __always_inline void
501 raw_atomic_set(atomic_t *v, int i)                501 raw_atomic_set(atomic_t *v, int i)
502 {                                                 502 {
503         arch_atomic_set(v, i);                    503         arch_atomic_set(v, i);
504 }                                                 504 }
505                                                   505 
506 /**                                               506 /**
507  * raw_atomic_set_release() - atomic set with     507  * raw_atomic_set_release() - atomic set with release ordering
508  * @v: pointer to atomic_t                        508  * @v: pointer to atomic_t
509  * @i: int value to assign                        509  * @i: int value to assign
510  *                                                510  *
511  * Atomically sets @v to @i with release order    511  * Atomically sets @v to @i with release ordering.
512  *                                                512  *
513  * Safe to use in noinstr code; prefer atomic_    513  * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
514  *                                                514  *
515  * Return: Nothing.                               515  * Return: Nothing.
516  */                                               516  */
517 static __always_inline void                       517 static __always_inline void
518 raw_atomic_set_release(atomic_t *v, int i)        518 raw_atomic_set_release(atomic_t *v, int i)
519 {                                                 519 {
520 #if defined(arch_atomic_set_release)              520 #if defined(arch_atomic_set_release)
521         arch_atomic_set_release(v, i);            521         arch_atomic_set_release(v, i);
522 #else                                             522 #else
523         if (__native_word(atomic_t)) {            523         if (__native_word(atomic_t)) {
524                 smp_store_release(&(v)->counte    524                 smp_store_release(&(v)->counter, i);
525         } else {                                  525         } else {
526                 __atomic_release_fence();         526                 __atomic_release_fence();
527                 raw_atomic_set(v, i);             527                 raw_atomic_set(v, i);
528         }                                         528         }
529 #endif                                            529 #endif
530 }                                                 530 }
531                                                   531 
532 /**                                               532 /**
533  * raw_atomic_add() - atomic add with relaxed     533  * raw_atomic_add() - atomic add with relaxed ordering
534  * @i: int value to add                           534  * @i: int value to add
535  * @v: pointer to atomic_t                        535  * @v: pointer to atomic_t
536  *                                                536  *
537  * Atomically updates @v to (@v + @i) with rel    537  * Atomically updates @v to (@v + @i) with relaxed ordering.
538  *                                                538  *
539  * Safe to use in noinstr code; prefer atomic_    539  * Safe to use in noinstr code; prefer atomic_add() elsewhere.
540  *                                                540  *
541  * Return: Nothing.                               541  * Return: Nothing.
542  */                                               542  */
543 static __always_inline void                       543 static __always_inline void
544 raw_atomic_add(int i, atomic_t *v)                544 raw_atomic_add(int i, atomic_t *v)
545 {                                                 545 {
546         arch_atomic_add(i, v);                    546         arch_atomic_add(i, v);
547 }                                                 547 }
548                                                   548 
549 /**                                               549 /**
550  * raw_atomic_add_return() - atomic add with f    550  * raw_atomic_add_return() - atomic add with full ordering
551  * @i: int value to add                           551  * @i: int value to add
552  * @v: pointer to atomic_t                        552  * @v: pointer to atomic_t
553  *                                                553  *
554  * Atomically updates @v to (@v + @i) with ful    554  * Atomically updates @v to (@v + @i) with full ordering.
555  *                                                555  *
556  * Safe to use in noinstr code; prefer atomic_    556  * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
557  *                                                557  *
558  * Return: The updated value of @v.               558  * Return: The updated value of @v.
559  */                                               559  */
560 static __always_inline int                        560 static __always_inline int
561 raw_atomic_add_return(int i, atomic_t *v)         561 raw_atomic_add_return(int i, atomic_t *v)
562 {                                                 562 {
563 #if defined(arch_atomic_add_return)               563 #if defined(arch_atomic_add_return)
564         return arch_atomic_add_return(i, v);      564         return arch_atomic_add_return(i, v);
565 #elif defined(arch_atomic_add_return_relaxed)     565 #elif defined(arch_atomic_add_return_relaxed)
566         int ret;                                  566         int ret;
567         __atomic_pre_full_fence();                567         __atomic_pre_full_fence();
568         ret = arch_atomic_add_return_relaxed(i    568         ret = arch_atomic_add_return_relaxed(i, v);
569         __atomic_post_full_fence();               569         __atomic_post_full_fence();
570         return ret;                               570         return ret;
571 #else                                             571 #else
572 #error "Unable to define raw_atomic_add_return    572 #error "Unable to define raw_atomic_add_return"
573 #endif                                            573 #endif
574 }                                                 574 }
575                                                   575 
576 /**                                               576 /**
577  * raw_atomic_add_return_acquire() - atomic ad    577  * raw_atomic_add_return_acquire() - atomic add with acquire ordering
578  * @i: int value to add                           578  * @i: int value to add
579  * @v: pointer to atomic_t                        579  * @v: pointer to atomic_t
580  *                                                580  *
581  * Atomically updates @v to (@v + @i) with acq    581  * Atomically updates @v to (@v + @i) with acquire ordering.
582  *                                                582  *
583  * Safe to use in noinstr code; prefer atomic_    583  * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
584  *                                                584  *
585  * Return: The updated value of @v.               585  * Return: The updated value of @v.
586  */                                               586  */
587 static __always_inline int                        587 static __always_inline int
588 raw_atomic_add_return_acquire(int i, atomic_t     588 raw_atomic_add_return_acquire(int i, atomic_t *v)
589 {                                                 589 {
590 #if defined(arch_atomic_add_return_acquire)       590 #if defined(arch_atomic_add_return_acquire)
591         return arch_atomic_add_return_acquire(    591         return arch_atomic_add_return_acquire(i, v);
592 #elif defined(arch_atomic_add_return_relaxed)     592 #elif defined(arch_atomic_add_return_relaxed)
593         int ret = arch_atomic_add_return_relax    593         int ret = arch_atomic_add_return_relaxed(i, v);
594         __atomic_acquire_fence();                 594         __atomic_acquire_fence();
595         return ret;                               595         return ret;
596 #elif defined(arch_atomic_add_return)             596 #elif defined(arch_atomic_add_return)
597         return arch_atomic_add_return(i, v);      597         return arch_atomic_add_return(i, v);
598 #else                                             598 #else
599 #error "Unable to define raw_atomic_add_return    599 #error "Unable to define raw_atomic_add_return_acquire"
600 #endif                                            600 #endif
601 }                                                 601 }
602                                                   602 
603 /**                                               603 /**
604  * raw_atomic_add_return_release() - atomic ad    604  * raw_atomic_add_return_release() - atomic add with release ordering
605  * @i: int value to add                           605  * @i: int value to add
606  * @v: pointer to atomic_t                        606  * @v: pointer to atomic_t
607  *                                                607  *
608  * Atomically updates @v to (@v + @i) with rel    608  * Atomically updates @v to (@v + @i) with release ordering.
609  *                                                609  *
610  * Safe to use in noinstr code; prefer atomic_    610  * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
611  *                                                611  *
612  * Return: The updated value of @v.               612  * Return: The updated value of @v.
613  */                                               613  */
614 static __always_inline int                        614 static __always_inline int
615 raw_atomic_add_return_release(int i, atomic_t     615 raw_atomic_add_return_release(int i, atomic_t *v)
616 {                                                 616 {
617 #if defined(arch_atomic_add_return_release)       617 #if defined(arch_atomic_add_return_release)
618         return arch_atomic_add_return_release(    618         return arch_atomic_add_return_release(i, v);
619 #elif defined(arch_atomic_add_return_relaxed)     619 #elif defined(arch_atomic_add_return_relaxed)
620         __atomic_release_fence();                 620         __atomic_release_fence();
621         return arch_atomic_add_return_relaxed(    621         return arch_atomic_add_return_relaxed(i, v);
622 #elif defined(arch_atomic_add_return)             622 #elif defined(arch_atomic_add_return)
623         return arch_atomic_add_return(i, v);      623         return arch_atomic_add_return(i, v);
624 #else                                             624 #else
625 #error "Unable to define raw_atomic_add_return    625 #error "Unable to define raw_atomic_add_return_release"
626 #endif                                            626 #endif
627 }                                                 627 }
628                                                   628 
629 /**                                               629 /**
630  * raw_atomic_add_return_relaxed() - atomic ad    630  * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
631  * @i: int value to add                           631  * @i: int value to add
632  * @v: pointer to atomic_t                        632  * @v: pointer to atomic_t
633  *                                                633  *
634  * Atomically updates @v to (@v + @i) with rel    634  * Atomically updates @v to (@v + @i) with relaxed ordering.
635  *                                                635  *
636  * Safe to use in noinstr code; prefer atomic_    636  * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
637  *                                                637  *
638  * Return: The updated value of @v.               638  * Return: The updated value of @v.
639  */                                               639  */
640 static __always_inline int                        640 static __always_inline int
641 raw_atomic_add_return_relaxed(int i, atomic_t     641 raw_atomic_add_return_relaxed(int i, atomic_t *v)
642 {                                                 642 {
643 #if defined(arch_atomic_add_return_relaxed)       643 #if defined(arch_atomic_add_return_relaxed)
644         return arch_atomic_add_return_relaxed(    644         return arch_atomic_add_return_relaxed(i, v);
645 #elif defined(arch_atomic_add_return)             645 #elif defined(arch_atomic_add_return)
646         return arch_atomic_add_return(i, v);      646         return arch_atomic_add_return(i, v);
647 #else                                             647 #else
648 #error "Unable to define raw_atomic_add_return    648 #error "Unable to define raw_atomic_add_return_relaxed"
649 #endif                                            649 #endif
650 }                                                 650 }
651                                                   651 
652 /**                                               652 /**
653  * raw_atomic_fetch_add() - atomic add with fu    653  * raw_atomic_fetch_add() - atomic add with full ordering
654  * @i: int value to add                           654  * @i: int value to add
655  * @v: pointer to atomic_t                        655  * @v: pointer to atomic_t
656  *                                                656  *
657  * Atomically updates @v to (@v + @i) with ful    657  * Atomically updates @v to (@v + @i) with full ordering.
658  *                                                658  *
659  * Safe to use in noinstr code; prefer atomic_    659  * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
660  *                                                660  *
661  * Return: The original value of @v.              661  * Return: The original value of @v.
662  */                                               662  */
663 static __always_inline int                        663 static __always_inline int
664 raw_atomic_fetch_add(int i, atomic_t *v)          664 raw_atomic_fetch_add(int i, atomic_t *v)
665 {                                                 665 {
666 #if defined(arch_atomic_fetch_add)                666 #if defined(arch_atomic_fetch_add)
667         return arch_atomic_fetch_add(i, v);       667         return arch_atomic_fetch_add(i, v);
668 #elif defined(arch_atomic_fetch_add_relaxed)      668 #elif defined(arch_atomic_fetch_add_relaxed)
669         int ret;                                  669         int ret;
670         __atomic_pre_full_fence();                670         __atomic_pre_full_fence();
671         ret = arch_atomic_fetch_add_relaxed(i,    671         ret = arch_atomic_fetch_add_relaxed(i, v);
672         __atomic_post_full_fence();               672         __atomic_post_full_fence();
673         return ret;                               673         return ret;
674 #else                                             674 #else
675 #error "Unable to define raw_atomic_fetch_add"    675 #error "Unable to define raw_atomic_fetch_add"
676 #endif                                            676 #endif
677 }                                                 677 }
678                                                   678 
679 /**                                               679 /**
680  * raw_atomic_fetch_add_acquire() - atomic add    680  * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
681  * @i: int value to add                           681  * @i: int value to add
682  * @v: pointer to atomic_t                        682  * @v: pointer to atomic_t
683  *                                                683  *
684  * Atomically updates @v to (@v + @i) with acq    684  * Atomically updates @v to (@v + @i) with acquire ordering.
685  *                                                685  *
686  * Safe to use in noinstr code; prefer atomic_    686  * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
687  *                                                687  *
688  * Return: The original value of @v.              688  * Return: The original value of @v.
689  */                                               689  */
690 static __always_inline int                        690 static __always_inline int
691 raw_atomic_fetch_add_acquire(int i, atomic_t *    691 raw_atomic_fetch_add_acquire(int i, atomic_t *v)
692 {                                                 692 {
693 #if defined(arch_atomic_fetch_add_acquire)        693 #if defined(arch_atomic_fetch_add_acquire)
694         return arch_atomic_fetch_add_acquire(i    694         return arch_atomic_fetch_add_acquire(i, v);
695 #elif defined(arch_atomic_fetch_add_relaxed)      695 #elif defined(arch_atomic_fetch_add_relaxed)
696         int ret = arch_atomic_fetch_add_relaxe    696         int ret = arch_atomic_fetch_add_relaxed(i, v);
697         __atomic_acquire_fence();                 697         __atomic_acquire_fence();
698         return ret;                               698         return ret;
699 #elif defined(arch_atomic_fetch_add)              699 #elif defined(arch_atomic_fetch_add)
700         return arch_atomic_fetch_add(i, v);       700         return arch_atomic_fetch_add(i, v);
701 #else                                             701 #else
702 #error "Unable to define raw_atomic_fetch_add_    702 #error "Unable to define raw_atomic_fetch_add_acquire"
703 #endif                                            703 #endif
704 }                                                 704 }
705                                                   705 
706 /**                                               706 /**
707  * raw_atomic_fetch_add_release() - atomic add    707  * raw_atomic_fetch_add_release() - atomic add with release ordering
708  * @i: int value to add                           708  * @i: int value to add
709  * @v: pointer to atomic_t                        709  * @v: pointer to atomic_t
710  *                                                710  *
711  * Atomically updates @v to (@v + @i) with rel    711  * Atomically updates @v to (@v + @i) with release ordering.
712  *                                                712  *
713  * Safe to use in noinstr code; prefer atomic_    713  * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
714  *                                                714  *
715  * Return: The original value of @v.              715  * Return: The original value of @v.
716  */                                               716  */
717 static __always_inline int                        717 static __always_inline int
718 raw_atomic_fetch_add_release(int i, atomic_t *    718 raw_atomic_fetch_add_release(int i, atomic_t *v)
719 {                                                 719 {
720 #if defined(arch_atomic_fetch_add_release)        720 #if defined(arch_atomic_fetch_add_release)
721         return arch_atomic_fetch_add_release(i    721         return arch_atomic_fetch_add_release(i, v);
722 #elif defined(arch_atomic_fetch_add_relaxed)      722 #elif defined(arch_atomic_fetch_add_relaxed)
723         __atomic_release_fence();                 723         __atomic_release_fence();
724         return arch_atomic_fetch_add_relaxed(i    724         return arch_atomic_fetch_add_relaxed(i, v);
725 #elif defined(arch_atomic_fetch_add)              725 #elif defined(arch_atomic_fetch_add)
726         return arch_atomic_fetch_add(i, v);       726         return arch_atomic_fetch_add(i, v);
727 #else                                             727 #else
728 #error "Unable to define raw_atomic_fetch_add_    728 #error "Unable to define raw_atomic_fetch_add_release"
729 #endif                                            729 #endif
730 }                                                 730 }
731                                                   731 
732 /**                                               732 /**
733  * raw_atomic_fetch_add_relaxed() - atomic add    733  * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
734  * @i: int value to add                           734  * @i: int value to add
735  * @v: pointer to atomic_t                        735  * @v: pointer to atomic_t
736  *                                                736  *
737  * Atomically updates @v to (@v + @i) with rel    737  * Atomically updates @v to (@v + @i) with relaxed ordering.
738  *                                                738  *
739  * Safe to use in noinstr code; prefer atomic_    739  * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
740  *                                                740  *
741  * Return: The original value of @v.              741  * Return: The original value of @v.
742  */                                               742  */
743 static __always_inline int                        743 static __always_inline int
744 raw_atomic_fetch_add_relaxed(int i, atomic_t *    744 raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
745 {                                                 745 {
746 #if defined(arch_atomic_fetch_add_relaxed)        746 #if defined(arch_atomic_fetch_add_relaxed)
747         return arch_atomic_fetch_add_relaxed(i    747         return arch_atomic_fetch_add_relaxed(i, v);
748 #elif defined(arch_atomic_fetch_add)              748 #elif defined(arch_atomic_fetch_add)
749         return arch_atomic_fetch_add(i, v);       749         return arch_atomic_fetch_add(i, v);
750 #else                                             750 #else
751 #error "Unable to define raw_atomic_fetch_add_    751 #error "Unable to define raw_atomic_fetch_add_relaxed"
752 #endif                                            752 #endif
753 }                                                 753 }
754                                                   754 
755 /**                                               755 /**
756  * raw_atomic_sub() - atomic subtract with rel    756  * raw_atomic_sub() - atomic subtract with relaxed ordering
757  * @i: int value to subtract                      757  * @i: int value to subtract
758  * @v: pointer to atomic_t                        758  * @v: pointer to atomic_t
759  *                                                759  *
760  * Atomically updates @v to (@v - @i) with rel    760  * Atomically updates @v to (@v - @i) with relaxed ordering.
761  *                                                761  *
762  * Safe to use in noinstr code; prefer atomic_    762  * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
763  *                                                763  *
764  * Return: Nothing.                               764  * Return: Nothing.
765  */                                               765  */
766 static __always_inline void                       766 static __always_inline void
767 raw_atomic_sub(int i, atomic_t *v)                767 raw_atomic_sub(int i, atomic_t *v)
768 {                                                 768 {
769         arch_atomic_sub(i, v);                    769         arch_atomic_sub(i, v);
770 }                                                 770 }
771                                                   771 
772 /**                                               772 /**
773  * raw_atomic_sub_return() - atomic subtract w    773  * raw_atomic_sub_return() - atomic subtract with full ordering
774  * @i: int value to subtract                      774  * @i: int value to subtract
775  * @v: pointer to atomic_t                        775  * @v: pointer to atomic_t
776  *                                                776  *
777  * Atomically updates @v to (@v - @i) with ful    777  * Atomically updates @v to (@v - @i) with full ordering.
778  *                                                778  *
779  * Safe to use in noinstr code; prefer atomic_    779  * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
780  *                                                780  *
781  * Return: The updated value of @v.               781  * Return: The updated value of @v.
782  */                                               782  */
783 static __always_inline int                        783 static __always_inline int
784 raw_atomic_sub_return(int i, atomic_t *v)         784 raw_atomic_sub_return(int i, atomic_t *v)
785 {                                                 785 {
786 #if defined(arch_atomic_sub_return)               786 #if defined(arch_atomic_sub_return)
787         return arch_atomic_sub_return(i, v);      787         return arch_atomic_sub_return(i, v);
788 #elif defined(arch_atomic_sub_return_relaxed)     788 #elif defined(arch_atomic_sub_return_relaxed)
789         int ret;                                  789         int ret;
790         __atomic_pre_full_fence();                790         __atomic_pre_full_fence();
791         ret = arch_atomic_sub_return_relaxed(i    791         ret = arch_atomic_sub_return_relaxed(i, v);
792         __atomic_post_full_fence();               792         __atomic_post_full_fence();
793         return ret;                               793         return ret;
794 #else                                             794 #else
795 #error "Unable to define raw_atomic_sub_return    795 #error "Unable to define raw_atomic_sub_return"
796 #endif                                            796 #endif
797 }                                                 797 }
798                                                   798 
799 /**                                               799 /**
800  * raw_atomic_sub_return_acquire() - atomic su    800  * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
801  * @i: int value to subtract                      801  * @i: int value to subtract
802  * @v: pointer to atomic_t                        802  * @v: pointer to atomic_t
803  *                                                803  *
804  * Atomically updates @v to (@v - @i) with acq    804  * Atomically updates @v to (@v - @i) with acquire ordering.
805  *                                                805  *
806  * Safe to use in noinstr code; prefer atomic_    806  * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
807  *                                                807  *
808  * Return: The updated value of @v.               808  * Return: The updated value of @v.
809  */                                               809  */
810 static __always_inline int                        810 static __always_inline int
811 raw_atomic_sub_return_acquire(int i, atomic_t     811 raw_atomic_sub_return_acquire(int i, atomic_t *v)
812 {                                                 812 {
813 #if defined(arch_atomic_sub_return_acquire)       813 #if defined(arch_atomic_sub_return_acquire)
814         return arch_atomic_sub_return_acquire(    814         return arch_atomic_sub_return_acquire(i, v);
815 #elif defined(arch_atomic_sub_return_relaxed)     815 #elif defined(arch_atomic_sub_return_relaxed)
816         int ret = arch_atomic_sub_return_relax    816         int ret = arch_atomic_sub_return_relaxed(i, v);
817         __atomic_acquire_fence();                 817         __atomic_acquire_fence();
818         return ret;                               818         return ret;
819 #elif defined(arch_atomic_sub_return)             819 #elif defined(arch_atomic_sub_return)
820         return arch_atomic_sub_return(i, v);      820         return arch_atomic_sub_return(i, v);
821 #else                                             821 #else
822 #error "Unable to define raw_atomic_sub_return    822 #error "Unable to define raw_atomic_sub_return_acquire"
823 #endif                                            823 #endif
824 }                                                 824 }
825                                                   825 
826 /**                                               826 /**
827  * raw_atomic_sub_return_release() - atomic su    827  * raw_atomic_sub_return_release() - atomic subtract with release ordering
828  * @i: int value to subtract                      828  * @i: int value to subtract
829  * @v: pointer to atomic_t                        829  * @v: pointer to atomic_t
830  *                                                830  *
831  * Atomically updates @v to (@v - @i) with rel    831  * Atomically updates @v to (@v - @i) with release ordering.
832  *                                                832  *
833  * Safe to use in noinstr code; prefer atomic_    833  * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
834  *                                                834  *
835  * Return: The updated value of @v.               835  * Return: The updated value of @v.
836  */                                               836  */
837 static __always_inline int                        837 static __always_inline int
838 raw_atomic_sub_return_release(int i, atomic_t     838 raw_atomic_sub_return_release(int i, atomic_t *v)
839 {                                                 839 {
840 #if defined(arch_atomic_sub_return_release)       840 #if defined(arch_atomic_sub_return_release)
841         return arch_atomic_sub_return_release(    841         return arch_atomic_sub_return_release(i, v);
842 #elif defined(arch_atomic_sub_return_relaxed)     842 #elif defined(arch_atomic_sub_return_relaxed)
843         __atomic_release_fence();                 843         __atomic_release_fence();
844         return arch_atomic_sub_return_relaxed(    844         return arch_atomic_sub_return_relaxed(i, v);
845 #elif defined(arch_atomic_sub_return)             845 #elif defined(arch_atomic_sub_return)
846         return arch_atomic_sub_return(i, v);      846         return arch_atomic_sub_return(i, v);
847 #else                                             847 #else
848 #error "Unable to define raw_atomic_sub_return    848 #error "Unable to define raw_atomic_sub_return_release"
849 #endif                                            849 #endif
850 }                                                 850 }
851                                                   851 
852 /**                                               852 /**
853  * raw_atomic_sub_return_relaxed() - atomic su    853  * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
854  * @i: int value to subtract                      854  * @i: int value to subtract
855  * @v: pointer to atomic_t                        855  * @v: pointer to atomic_t
856  *                                                856  *
857  * Atomically updates @v to (@v - @i) with rel    857  * Atomically updates @v to (@v - @i) with relaxed ordering.
858  *                                                858  *
859  * Safe to use in noinstr code; prefer atomic_    859  * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
860  *                                                860  *
861  * Return: The updated value of @v.               861  * Return: The updated value of @v.
862  */                                               862  */
863 static __always_inline int                        863 static __always_inline int
864 raw_atomic_sub_return_relaxed(int i, atomic_t     864 raw_atomic_sub_return_relaxed(int i, atomic_t *v)
865 {                                                 865 {
866 #if defined(arch_atomic_sub_return_relaxed)       866 #if defined(arch_atomic_sub_return_relaxed)
867         return arch_atomic_sub_return_relaxed(    867         return arch_atomic_sub_return_relaxed(i, v);
868 #elif defined(arch_atomic_sub_return)             868 #elif defined(arch_atomic_sub_return)
869         return arch_atomic_sub_return(i, v);      869         return arch_atomic_sub_return(i, v);
870 #else                                             870 #else
871 #error "Unable to define raw_atomic_sub_return    871 #error "Unable to define raw_atomic_sub_return_relaxed"
872 #endif                                            872 #endif
873 }                                                 873 }
874                                                   874 
875 /**                                               875 /**
876  * raw_atomic_fetch_sub() - atomic subtract wi    876  * raw_atomic_fetch_sub() - atomic subtract with full ordering
877  * @i: int value to subtract                      877  * @i: int value to subtract
878  * @v: pointer to atomic_t                        878  * @v: pointer to atomic_t
879  *                                                879  *
880  * Atomically updates @v to (@v - @i) with ful    880  * Atomically updates @v to (@v - @i) with full ordering.
881  *                                                881  *
882  * Safe to use in noinstr code; prefer atomic_    882  * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
883  *                                                883  *
884  * Return: The original value of @v.              884  * Return: The original value of @v.
885  */                                               885  */
886 static __always_inline int                        886 static __always_inline int
887 raw_atomic_fetch_sub(int i, atomic_t *v)          887 raw_atomic_fetch_sub(int i, atomic_t *v)
888 {                                                 888 {
889 #if defined(arch_atomic_fetch_sub)                889 #if defined(arch_atomic_fetch_sub)
890         return arch_atomic_fetch_sub(i, v);       890         return arch_atomic_fetch_sub(i, v);
891 #elif defined(arch_atomic_fetch_sub_relaxed)      891 #elif defined(arch_atomic_fetch_sub_relaxed)
892         int ret;                                  892         int ret;
893         __atomic_pre_full_fence();                893         __atomic_pre_full_fence();
894         ret = arch_atomic_fetch_sub_relaxed(i,    894         ret = arch_atomic_fetch_sub_relaxed(i, v);
895         __atomic_post_full_fence();               895         __atomic_post_full_fence();
896         return ret;                               896         return ret;
897 #else                                             897 #else
898 #error "Unable to define raw_atomic_fetch_sub"    898 #error "Unable to define raw_atomic_fetch_sub"
899 #endif                                            899 #endif
900 }                                                 900 }
901                                                   901 
902 /**                                               902 /**
903  * raw_atomic_fetch_sub_acquire() - atomic sub    903  * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
904  * @i: int value to subtract                      904  * @i: int value to subtract
905  * @v: pointer to atomic_t                        905  * @v: pointer to atomic_t
906  *                                                906  *
907  * Atomically updates @v to (@v - @i) with acq    907  * Atomically updates @v to (@v - @i) with acquire ordering.
908  *                                                908  *
909  * Safe to use in noinstr code; prefer atomic_    909  * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
910  *                                                910  *
911  * Return: The original value of @v.              911  * Return: The original value of @v.
912  */                                               912  */
913 static __always_inline int                        913 static __always_inline int
914 raw_atomic_fetch_sub_acquire(int i, atomic_t *    914 raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
915 {                                                 915 {
916 #if defined(arch_atomic_fetch_sub_acquire)        916 #if defined(arch_atomic_fetch_sub_acquire)
917         return arch_atomic_fetch_sub_acquire(i    917         return arch_atomic_fetch_sub_acquire(i, v);
918 #elif defined(arch_atomic_fetch_sub_relaxed)      918 #elif defined(arch_atomic_fetch_sub_relaxed)
919         int ret = arch_atomic_fetch_sub_relaxe    919         int ret = arch_atomic_fetch_sub_relaxed(i, v);
920         __atomic_acquire_fence();                 920         __atomic_acquire_fence();
921         return ret;                               921         return ret;
922 #elif defined(arch_atomic_fetch_sub)              922 #elif defined(arch_atomic_fetch_sub)
923         return arch_atomic_fetch_sub(i, v);       923         return arch_atomic_fetch_sub(i, v);
924 #else                                             924 #else
925 #error "Unable to define raw_atomic_fetch_sub_    925 #error "Unable to define raw_atomic_fetch_sub_acquire"
926 #endif                                            926 #endif
927 }                                                 927 }
928                                                   928 
929 /**                                               929 /**
930  * raw_atomic_fetch_sub_release() - atomic sub    930  * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
931  * @i: int value to subtract                      931  * @i: int value to subtract
932  * @v: pointer to atomic_t                        932  * @v: pointer to atomic_t
933  *                                                933  *
934  * Atomically updates @v to (@v - @i) with rel    934  * Atomically updates @v to (@v - @i) with release ordering.
935  *                                                935  *
936  * Safe to use in noinstr code; prefer atomic_    936  * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
937  *                                                937  *
938  * Return: The original value of @v.              938  * Return: The original value of @v.
939  */                                               939  */
940 static __always_inline int                        940 static __always_inline int
941 raw_atomic_fetch_sub_release(int i, atomic_t *    941 raw_atomic_fetch_sub_release(int i, atomic_t *v)
942 {                                                 942 {
943 #if defined(arch_atomic_fetch_sub_release)        943 #if defined(arch_atomic_fetch_sub_release)
944         return arch_atomic_fetch_sub_release(i    944         return arch_atomic_fetch_sub_release(i, v);
945 #elif defined(arch_atomic_fetch_sub_relaxed)      945 #elif defined(arch_atomic_fetch_sub_relaxed)
946         __atomic_release_fence();                 946         __atomic_release_fence();
947         return arch_atomic_fetch_sub_relaxed(i    947         return arch_atomic_fetch_sub_relaxed(i, v);
948 #elif defined(arch_atomic_fetch_sub)              948 #elif defined(arch_atomic_fetch_sub)
949         return arch_atomic_fetch_sub(i, v);       949         return arch_atomic_fetch_sub(i, v);
950 #else                                             950 #else
951 #error "Unable to define raw_atomic_fetch_sub_    951 #error "Unable to define raw_atomic_fetch_sub_release"
952 #endif                                            952 #endif
953 }                                                 953 }
954                                                   954 
955 /**                                               955 /**
956  * raw_atomic_fetch_sub_relaxed() - atomic sub    956  * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
957  * @i: int value to subtract                      957  * @i: int value to subtract
958  * @v: pointer to atomic_t                        958  * @v: pointer to atomic_t
959  *                                                959  *
960  * Atomically updates @v to (@v - @i) with rel    960  * Atomically updates @v to (@v - @i) with relaxed ordering.
961  *                                                961  *
962  * Safe to use in noinstr code; prefer atomic_    962  * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
963  *                                                963  *
964  * Return: The original value of @v.              964  * Return: The original value of @v.
965  */                                               965  */
966 static __always_inline int                        966 static __always_inline int
967 raw_atomic_fetch_sub_relaxed(int i, atomic_t *    967 raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
968 {                                                 968 {
969 #if defined(arch_atomic_fetch_sub_relaxed)        969 #if defined(arch_atomic_fetch_sub_relaxed)
970         return arch_atomic_fetch_sub_relaxed(i    970         return arch_atomic_fetch_sub_relaxed(i, v);
971 #elif defined(arch_atomic_fetch_sub)              971 #elif defined(arch_atomic_fetch_sub)
972         return arch_atomic_fetch_sub(i, v);       972         return arch_atomic_fetch_sub(i, v);
973 #else                                             973 #else
974 #error "Unable to define raw_atomic_fetch_sub_    974 #error "Unable to define raw_atomic_fetch_sub_relaxed"
975 #endif                                            975 #endif
976 }                                                 976 }
977                                                   977 
978 /**                                               978 /**
979  * raw_atomic_inc() - atomic increment with re    979  * raw_atomic_inc() - atomic increment with relaxed ordering
980  * @v: pointer to atomic_t                        980  * @v: pointer to atomic_t
981  *                                                981  *
982  * Atomically updates @v to (@v + 1) with rela    982  * Atomically updates @v to (@v + 1) with relaxed ordering.
983  *                                                983  *
984  * Safe to use in noinstr code; prefer atomic_    984  * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
985  *                                                985  *
986  * Return: Nothing.                               986  * Return: Nothing.
987  */                                               987  */
988 static __always_inline void                       988 static __always_inline void
989 raw_atomic_inc(atomic_t *v)                       989 raw_atomic_inc(atomic_t *v)
990 {                                                 990 {
991 #if defined(arch_atomic_inc)                      991 #if defined(arch_atomic_inc)
992         arch_atomic_inc(v);                       992         arch_atomic_inc(v);
993 #else                                             993 #else
994         raw_atomic_add(1, v);                     994         raw_atomic_add(1, v);
995 #endif                                            995 #endif
996 }                                                 996 }
997                                                   997 
998 /**                                               998 /**
999  * raw_atomic_inc_return() - atomic increment     999  * raw_atomic_inc_return() - atomic increment with full ordering
1000  * @v: pointer to atomic_t                       1000  * @v: pointer to atomic_t
1001  *                                               1001  *
1002  * Atomically updates @v to (@v + 1) with ful    1002  * Atomically updates @v to (@v + 1) with full ordering.
1003  *                                               1003  *
1004  * Safe to use in noinstr code; prefer atomic    1004  * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
1005  *                                               1005  *
1006  * Return: The updated value of @v.              1006  * Return: The updated value of @v.
1007  */                                              1007  */
1008 static __always_inline int                       1008 static __always_inline int
1009 raw_atomic_inc_return(atomic_t *v)               1009 raw_atomic_inc_return(atomic_t *v)
1010 {                                                1010 {
1011 #if defined(arch_atomic_inc_return)              1011 #if defined(arch_atomic_inc_return)
1012         return arch_atomic_inc_return(v);        1012         return arch_atomic_inc_return(v);
1013 #elif defined(arch_atomic_inc_return_relaxed)    1013 #elif defined(arch_atomic_inc_return_relaxed)
1014         int ret;                                 1014         int ret;
1015         __atomic_pre_full_fence();               1015         __atomic_pre_full_fence();
1016         ret = arch_atomic_inc_return_relaxed(    1016         ret = arch_atomic_inc_return_relaxed(v);
1017         __atomic_post_full_fence();              1017         __atomic_post_full_fence();
1018         return ret;                              1018         return ret;
1019 #else                                            1019 #else
1020         return raw_atomic_add_return(1, v);      1020         return raw_atomic_add_return(1, v);
1021 #endif                                           1021 #endif
1022 }                                                1022 }
1023                                                  1023 
1024 /**                                              1024 /**
1025  * raw_atomic_inc_return_acquire() - atomic i    1025  * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
1026  * @v: pointer to atomic_t                       1026  * @v: pointer to atomic_t
1027  *                                               1027  *
1028  * Atomically updates @v to (@v + 1) with acq    1028  * Atomically updates @v to (@v + 1) with acquire ordering.
1029  *                                               1029  *
1030  * Safe to use in noinstr code; prefer atomic    1030  * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
1031  *                                               1031  *
1032  * Return: The updated value of @v.              1032  * Return: The updated value of @v.
1033  */                                              1033  */
1034 static __always_inline int                       1034 static __always_inline int
1035 raw_atomic_inc_return_acquire(atomic_t *v)       1035 raw_atomic_inc_return_acquire(atomic_t *v)
1036 {                                                1036 {
1037 #if defined(arch_atomic_inc_return_acquire)      1037 #if defined(arch_atomic_inc_return_acquire)
1038         return arch_atomic_inc_return_acquire    1038         return arch_atomic_inc_return_acquire(v);
1039 #elif defined(arch_atomic_inc_return_relaxed)    1039 #elif defined(arch_atomic_inc_return_relaxed)
1040         int ret = arch_atomic_inc_return_rela    1040         int ret = arch_atomic_inc_return_relaxed(v);
1041         __atomic_acquire_fence();                1041         __atomic_acquire_fence();
1042         return ret;                              1042         return ret;
1043 #elif defined(arch_atomic_inc_return)            1043 #elif defined(arch_atomic_inc_return)
1044         return arch_atomic_inc_return(v);        1044         return arch_atomic_inc_return(v);
1045 #else                                            1045 #else
1046         return raw_atomic_add_return_acquire(    1046         return raw_atomic_add_return_acquire(1, v);
1047 #endif                                           1047 #endif
1048 }                                                1048 }
1049                                                  1049 
1050 /**                                              1050 /**
1051  * raw_atomic_inc_return_release() - atomic i    1051  * raw_atomic_inc_return_release() - atomic increment with release ordering
1052  * @v: pointer to atomic_t                       1052  * @v: pointer to atomic_t
1053  *                                               1053  *
1054  * Atomically updates @v to (@v + 1) with rel    1054  * Atomically updates @v to (@v + 1) with release ordering.
1055  *                                               1055  *
1056  * Safe to use in noinstr code; prefer atomic    1056  * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
1057  *                                               1057  *
1058  * Return: The updated value of @v.              1058  * Return: The updated value of @v.
1059  */                                              1059  */
1060 static __always_inline int                       1060 static __always_inline int
1061 raw_atomic_inc_return_release(atomic_t *v)       1061 raw_atomic_inc_return_release(atomic_t *v)
1062 {                                                1062 {
1063 #if defined(arch_atomic_inc_return_release)      1063 #if defined(arch_atomic_inc_return_release)
1064         return arch_atomic_inc_return_release    1064         return arch_atomic_inc_return_release(v);
1065 #elif defined(arch_atomic_inc_return_relaxed)    1065 #elif defined(arch_atomic_inc_return_relaxed)
1066         __atomic_release_fence();                1066         __atomic_release_fence();
1067         return arch_atomic_inc_return_relaxed    1067         return arch_atomic_inc_return_relaxed(v);
1068 #elif defined(arch_atomic_inc_return)            1068 #elif defined(arch_atomic_inc_return)
1069         return arch_atomic_inc_return(v);        1069         return arch_atomic_inc_return(v);
1070 #else                                            1070 #else
1071         return raw_atomic_add_return_release(    1071         return raw_atomic_add_return_release(1, v);
1072 #endif                                           1072 #endif
1073 }                                                1073 }
1074                                                  1074 
1075 /**                                              1075 /**
1076  * raw_atomic_inc_return_relaxed() - atomic i    1076  * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
1077  * @v: pointer to atomic_t                       1077  * @v: pointer to atomic_t
1078  *                                               1078  *
1079  * Atomically updates @v to (@v + 1) with rel    1079  * Atomically updates @v to (@v + 1) with relaxed ordering.
1080  *                                               1080  *
1081  * Safe to use in noinstr code; prefer atomic    1081  * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
1082  *                                               1082  *
1083  * Return: The updated value of @v.              1083  * Return: The updated value of @v.
1084  */                                              1084  */
1085 static __always_inline int                       1085 static __always_inline int
1086 raw_atomic_inc_return_relaxed(atomic_t *v)       1086 raw_atomic_inc_return_relaxed(atomic_t *v)
1087 {                                                1087 {
1088 #if defined(arch_atomic_inc_return_relaxed)      1088 #if defined(arch_atomic_inc_return_relaxed)
1089         return arch_atomic_inc_return_relaxed    1089         return arch_atomic_inc_return_relaxed(v);
1090 #elif defined(arch_atomic_inc_return)            1090 #elif defined(arch_atomic_inc_return)
1091         return arch_atomic_inc_return(v);        1091         return arch_atomic_inc_return(v);
1092 #else                                            1092 #else
1093         return raw_atomic_add_return_relaxed(    1093         return raw_atomic_add_return_relaxed(1, v);
1094 #endif                                           1094 #endif
1095 }                                                1095 }
1096                                                  1096 
1097 /**                                              1097 /**
1098  * raw_atomic_fetch_inc() - atomic increment     1098  * raw_atomic_fetch_inc() - atomic increment with full ordering
1099  * @v: pointer to atomic_t                       1099  * @v: pointer to atomic_t
1100  *                                               1100  *
1101  * Atomically updates @v to (@v + 1) with ful    1101  * Atomically updates @v to (@v + 1) with full ordering.
1102  *                                               1102  *
1103  * Safe to use in noinstr code; prefer atomic    1103  * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
1104  *                                               1104  *
1105  * Return: The original value of @v.             1105  * Return: The original value of @v.
1106  */                                              1106  */
1107 static __always_inline int                       1107 static __always_inline int
1108 raw_atomic_fetch_inc(atomic_t *v)                1108 raw_atomic_fetch_inc(atomic_t *v)
1109 {                                                1109 {
1110 #if defined(arch_atomic_fetch_inc)               1110 #if defined(arch_atomic_fetch_inc)
1111         return arch_atomic_fetch_inc(v);         1111         return arch_atomic_fetch_inc(v);
1112 #elif defined(arch_atomic_fetch_inc_relaxed)     1112 #elif defined(arch_atomic_fetch_inc_relaxed)
1113         int ret;                                 1113         int ret;
1114         __atomic_pre_full_fence();               1114         __atomic_pre_full_fence();
1115         ret = arch_atomic_fetch_inc_relaxed(v    1115         ret = arch_atomic_fetch_inc_relaxed(v);
1116         __atomic_post_full_fence();              1116         __atomic_post_full_fence();
1117         return ret;                              1117         return ret;
1118 #else                                            1118 #else
1119         return raw_atomic_fetch_add(1, v);       1119         return raw_atomic_fetch_add(1, v);
1120 #endif                                           1120 #endif
1121 }                                                1121 }
1122                                                  1122 
1123 /**                                              1123 /**
1124  * raw_atomic_fetch_inc_acquire() - atomic in    1124  * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
1125  * @v: pointer to atomic_t                       1125  * @v: pointer to atomic_t
1126  *                                               1126  *
1127  * Atomically updates @v to (@v + 1) with acq    1127  * Atomically updates @v to (@v + 1) with acquire ordering.
1128  *                                               1128  *
1129  * Safe to use in noinstr code; prefer atomic    1129  * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
1130  *                                               1130  *
1131  * Return: The original value of @v.             1131  * Return: The original value of @v.
1132  */                                              1132  */
1133 static __always_inline int                       1133 static __always_inline int
1134 raw_atomic_fetch_inc_acquire(atomic_t *v)        1134 raw_atomic_fetch_inc_acquire(atomic_t *v)
1135 {                                                1135 {
1136 #if defined(arch_atomic_fetch_inc_acquire)       1136 #if defined(arch_atomic_fetch_inc_acquire)
1137         return arch_atomic_fetch_inc_acquire(    1137         return arch_atomic_fetch_inc_acquire(v);
1138 #elif defined(arch_atomic_fetch_inc_relaxed)     1138 #elif defined(arch_atomic_fetch_inc_relaxed)
1139         int ret = arch_atomic_fetch_inc_relax    1139         int ret = arch_atomic_fetch_inc_relaxed(v);
1140         __atomic_acquire_fence();                1140         __atomic_acquire_fence();
1141         return ret;                              1141         return ret;
1142 #elif defined(arch_atomic_fetch_inc)             1142 #elif defined(arch_atomic_fetch_inc)
1143         return arch_atomic_fetch_inc(v);         1143         return arch_atomic_fetch_inc(v);
1144 #else                                            1144 #else
1145         return raw_atomic_fetch_add_acquire(1    1145         return raw_atomic_fetch_add_acquire(1, v);
1146 #endif                                           1146 #endif
1147 }                                                1147 }
1148                                                  1148 
1149 /**                                              1149 /**
1150  * raw_atomic_fetch_inc_release() - atomic in    1150  * raw_atomic_fetch_inc_release() - atomic increment with release ordering
1151  * @v: pointer to atomic_t                       1151  * @v: pointer to atomic_t
1152  *                                               1152  *
1153  * Atomically updates @v to (@v + 1) with rel    1153  * Atomically updates @v to (@v + 1) with release ordering.
1154  *                                               1154  *
1155  * Safe to use in noinstr code; prefer atomic    1155  * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
1156  *                                               1156  *
1157  * Return: The original value of @v.             1157  * Return: The original value of @v.
1158  */                                              1158  */
1159 static __always_inline int                       1159 static __always_inline int
1160 raw_atomic_fetch_inc_release(atomic_t *v)        1160 raw_atomic_fetch_inc_release(atomic_t *v)
1161 {                                                1161 {
1162 #if defined(arch_atomic_fetch_inc_release)       1162 #if defined(arch_atomic_fetch_inc_release)
1163         return arch_atomic_fetch_inc_release(    1163         return arch_atomic_fetch_inc_release(v);
1164 #elif defined(arch_atomic_fetch_inc_relaxed)     1164 #elif defined(arch_atomic_fetch_inc_relaxed)
1165         __atomic_release_fence();                1165         __atomic_release_fence();
1166         return arch_atomic_fetch_inc_relaxed(    1166         return arch_atomic_fetch_inc_relaxed(v);
1167 #elif defined(arch_atomic_fetch_inc)             1167 #elif defined(arch_atomic_fetch_inc)
1168         return arch_atomic_fetch_inc(v);         1168         return arch_atomic_fetch_inc(v);
1169 #else                                            1169 #else
1170         return raw_atomic_fetch_add_release(1    1170         return raw_atomic_fetch_add_release(1, v);
1171 #endif                                           1171 #endif
1172 }                                                1172 }
1173                                                  1173 
1174 /**                                              1174 /**
1175  * raw_atomic_fetch_inc_relaxed() - atomic in    1175  * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
1176  * @v: pointer to atomic_t                       1176  * @v: pointer to atomic_t
1177  *                                               1177  *
1178  * Atomically updates @v to (@v + 1) with rel    1178  * Atomically updates @v to (@v + 1) with relaxed ordering.
1179  *                                               1179  *
1180  * Safe to use in noinstr code; prefer atomic    1180  * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
1181  *                                               1181  *
1182  * Return: The original value of @v.             1182  * Return: The original value of @v.
1183  */                                              1183  */
1184 static __always_inline int                       1184 static __always_inline int
1185 raw_atomic_fetch_inc_relaxed(atomic_t *v)        1185 raw_atomic_fetch_inc_relaxed(atomic_t *v)
1186 {                                                1186 {
1187 #if defined(arch_atomic_fetch_inc_relaxed)       1187 #if defined(arch_atomic_fetch_inc_relaxed)
1188         return arch_atomic_fetch_inc_relaxed(    1188         return arch_atomic_fetch_inc_relaxed(v);
1189 #elif defined(arch_atomic_fetch_inc)             1189 #elif defined(arch_atomic_fetch_inc)
1190         return arch_atomic_fetch_inc(v);         1190         return arch_atomic_fetch_inc(v);
1191 #else                                            1191 #else
1192         return raw_atomic_fetch_add_relaxed(1    1192         return raw_atomic_fetch_add_relaxed(1, v);
1193 #endif                                           1193 #endif
1194 }                                                1194 }
1195                                                  1195 
1196 /**                                              1196 /**
1197  * raw_atomic_dec() - atomic decrement with r    1197  * raw_atomic_dec() - atomic decrement with relaxed ordering
1198  * @v: pointer to atomic_t                       1198  * @v: pointer to atomic_t
1199  *                                               1199  *
1200  * Atomically updates @v to (@v - 1) with rel    1200  * Atomically updates @v to (@v - 1) with relaxed ordering.
1201  *                                               1201  *
1202  * Safe to use in noinstr code; prefer atomic    1202  * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
1203  *                                               1203  *
1204  * Return: Nothing.                              1204  * Return: Nothing.
1205  */                                              1205  */
1206 static __always_inline void                      1206 static __always_inline void
1207 raw_atomic_dec(atomic_t *v)                      1207 raw_atomic_dec(atomic_t *v)
1208 {                                                1208 {
1209 #if defined(arch_atomic_dec)                     1209 #if defined(arch_atomic_dec)
1210         arch_atomic_dec(v);                      1210         arch_atomic_dec(v);
1211 #else                                            1211 #else
1212         raw_atomic_sub(1, v);                    1212         raw_atomic_sub(1, v);
1213 #endif                                           1213 #endif
1214 }                                                1214 }
1215                                                  1215 
1216 /**                                              1216 /**
1217  * raw_atomic_dec_return() - atomic decrement    1217  * raw_atomic_dec_return() - atomic decrement with full ordering
1218  * @v: pointer to atomic_t                       1218  * @v: pointer to atomic_t
1219  *                                               1219  *
1220  * Atomically updates @v to (@v - 1) with ful    1220  * Atomically updates @v to (@v - 1) with full ordering.
1221  *                                               1221  *
1222  * Safe to use in noinstr code; prefer atomic    1222  * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
1223  *                                               1223  *
1224  * Return: The updated value of @v.              1224  * Return: The updated value of @v.
1225  */                                              1225  */
1226 static __always_inline int                       1226 static __always_inline int
1227 raw_atomic_dec_return(atomic_t *v)               1227 raw_atomic_dec_return(atomic_t *v)
1228 {                                                1228 {
1229 #if defined(arch_atomic_dec_return)              1229 #if defined(arch_atomic_dec_return)
1230         return arch_atomic_dec_return(v);        1230         return arch_atomic_dec_return(v);
1231 #elif defined(arch_atomic_dec_return_relaxed)    1231 #elif defined(arch_atomic_dec_return_relaxed)
1232         int ret;                                 1232         int ret;
1233         __atomic_pre_full_fence();               1233         __atomic_pre_full_fence();
1234         ret = arch_atomic_dec_return_relaxed(    1234         ret = arch_atomic_dec_return_relaxed(v);
1235         __atomic_post_full_fence();              1235         __atomic_post_full_fence();
1236         return ret;                              1236         return ret;
1237 #else                                            1237 #else
1238         return raw_atomic_sub_return(1, v);      1238         return raw_atomic_sub_return(1, v);
1239 #endif                                           1239 #endif
1240 }                                                1240 }
1241                                                  1241 
1242 /**                                              1242 /**
1243  * raw_atomic_dec_return_acquire() - atomic d    1243  * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
1244  * @v: pointer to atomic_t                       1244  * @v: pointer to atomic_t
1245  *                                               1245  *
1246  * Atomically updates @v to (@v - 1) with acq    1246  * Atomically updates @v to (@v - 1) with acquire ordering.
1247  *                                               1247  *
1248  * Safe to use in noinstr code; prefer atomic    1248  * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
1249  *                                               1249  *
1250  * Return: The updated value of @v.              1250  * Return: The updated value of @v.
1251  */                                              1251  */
1252 static __always_inline int                       1252 static __always_inline int
1253 raw_atomic_dec_return_acquire(atomic_t *v)       1253 raw_atomic_dec_return_acquire(atomic_t *v)
1254 {                                                1254 {
1255 #if defined(arch_atomic_dec_return_acquire)      1255 #if defined(arch_atomic_dec_return_acquire)
1256         return arch_atomic_dec_return_acquire    1256         return arch_atomic_dec_return_acquire(v);
1257 #elif defined(arch_atomic_dec_return_relaxed)    1257 #elif defined(arch_atomic_dec_return_relaxed)
1258         int ret = arch_atomic_dec_return_rela    1258         int ret = arch_atomic_dec_return_relaxed(v);
1259         __atomic_acquire_fence();                1259         __atomic_acquire_fence();
1260         return ret;                              1260         return ret;
1261 #elif defined(arch_atomic_dec_return)            1261 #elif defined(arch_atomic_dec_return)
1262         return arch_atomic_dec_return(v);        1262         return arch_atomic_dec_return(v);
1263 #else                                            1263 #else
1264         return raw_atomic_sub_return_acquire(    1264         return raw_atomic_sub_return_acquire(1, v);
1265 #endif                                           1265 #endif
1266 }                                                1266 }
1267                                                  1267 
1268 /**                                              1268 /**
1269  * raw_atomic_dec_return_release() - atomic d    1269  * raw_atomic_dec_return_release() - atomic decrement with release ordering
1270  * @v: pointer to atomic_t                       1270  * @v: pointer to atomic_t
1271  *                                               1271  *
1272  * Atomically updates @v to (@v - 1) with rel    1272  * Atomically updates @v to (@v - 1) with release ordering.
1273  *                                               1273  *
1274  * Safe to use in noinstr code; prefer atomic    1274  * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
1275  *                                               1275  *
1276  * Return: The updated value of @v.              1276  * Return: The updated value of @v.
1277  */                                              1277  */
1278 static __always_inline int                       1278 static __always_inline int
1279 raw_atomic_dec_return_release(atomic_t *v)       1279 raw_atomic_dec_return_release(atomic_t *v)
1280 {                                                1280 {
1281 #if defined(arch_atomic_dec_return_release)      1281 #if defined(arch_atomic_dec_return_release)
1282         return arch_atomic_dec_return_release    1282         return arch_atomic_dec_return_release(v);
1283 #elif defined(arch_atomic_dec_return_relaxed)    1283 #elif defined(arch_atomic_dec_return_relaxed)
1284         __atomic_release_fence();                1284         __atomic_release_fence();
1285         return arch_atomic_dec_return_relaxed    1285         return arch_atomic_dec_return_relaxed(v);
1286 #elif defined(arch_atomic_dec_return)            1286 #elif defined(arch_atomic_dec_return)
1287         return arch_atomic_dec_return(v);        1287         return arch_atomic_dec_return(v);
1288 #else                                            1288 #else
1289         return raw_atomic_sub_return_release(    1289         return raw_atomic_sub_return_release(1, v);
1290 #endif                                           1290 #endif
1291 }                                                1291 }
1292                                                  1292 
1293 /**                                              1293 /**
1294  * raw_atomic_dec_return_relaxed() - atomic d    1294  * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
1295  * @v: pointer to atomic_t                       1295  * @v: pointer to atomic_t
1296  *                                               1296  *
1297  * Atomically updates @v to (@v - 1) with rel    1297  * Atomically updates @v to (@v - 1) with relaxed ordering.
1298  *                                               1298  *
1299  * Safe to use in noinstr code; prefer atomic    1299  * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
1300  *                                               1300  *
1301  * Return: The updated value of @v.              1301  * Return: The updated value of @v.
1302  */                                              1302  */
1303 static __always_inline int                       1303 static __always_inline int
1304 raw_atomic_dec_return_relaxed(atomic_t *v)       1304 raw_atomic_dec_return_relaxed(atomic_t *v)
1305 {                                                1305 {
1306 #if defined(arch_atomic_dec_return_relaxed)      1306 #if defined(arch_atomic_dec_return_relaxed)
1307         return arch_atomic_dec_return_relaxed    1307         return arch_atomic_dec_return_relaxed(v);
1308 #elif defined(arch_atomic_dec_return)            1308 #elif defined(arch_atomic_dec_return)
1309         return arch_atomic_dec_return(v);        1309         return arch_atomic_dec_return(v);
1310 #else                                            1310 #else
1311         return raw_atomic_sub_return_relaxed(    1311         return raw_atomic_sub_return_relaxed(1, v);
1312 #endif                                           1312 #endif
1313 }                                                1313 }
1314                                                  1314 
1315 /**                                              1315 /**
1316  * raw_atomic_fetch_dec() - atomic decrement     1316  * raw_atomic_fetch_dec() - atomic decrement with full ordering
1317  * @v: pointer to atomic_t                       1317  * @v: pointer to atomic_t
1318  *                                               1318  *
1319  * Atomically updates @v to (@v - 1) with ful    1319  * Atomically updates @v to (@v - 1) with full ordering.
1320  *                                               1320  *
1321  * Safe to use in noinstr code; prefer atomic    1321  * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
1322  *                                               1322  *
1323  * Return: The original value of @v.             1323  * Return: The original value of @v.
1324  */                                              1324  */
1325 static __always_inline int                       1325 static __always_inline int
1326 raw_atomic_fetch_dec(atomic_t *v)                1326 raw_atomic_fetch_dec(atomic_t *v)
1327 {                                                1327 {
1328 #if defined(arch_atomic_fetch_dec)               1328 #if defined(arch_atomic_fetch_dec)
1329         return arch_atomic_fetch_dec(v);         1329         return arch_atomic_fetch_dec(v);
1330 #elif defined(arch_atomic_fetch_dec_relaxed)     1330 #elif defined(arch_atomic_fetch_dec_relaxed)
1331         int ret;                                 1331         int ret;
1332         __atomic_pre_full_fence();               1332         __atomic_pre_full_fence();
1333         ret = arch_atomic_fetch_dec_relaxed(v    1333         ret = arch_atomic_fetch_dec_relaxed(v);
1334         __atomic_post_full_fence();              1334         __atomic_post_full_fence();
1335         return ret;                              1335         return ret;
1336 #else                                            1336 #else
1337         return raw_atomic_fetch_sub(1, v);       1337         return raw_atomic_fetch_sub(1, v);
1338 #endif                                           1338 #endif
1339 }                                                1339 }
1340                                                  1340 
1341 /**                                              1341 /**
1342  * raw_atomic_fetch_dec_acquire() - atomic de    1342  * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
1343  * @v: pointer to atomic_t                       1343  * @v: pointer to atomic_t
1344  *                                               1344  *
1345  * Atomically updates @v to (@v - 1) with acq    1345  * Atomically updates @v to (@v - 1) with acquire ordering.
1346  *                                               1346  *
1347  * Safe to use in noinstr code; prefer atomic    1347  * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
1348  *                                               1348  *
1349  * Return: The original value of @v.             1349  * Return: The original value of @v.
1350  */                                              1350  */
1351 static __always_inline int                       1351 static __always_inline int
1352 raw_atomic_fetch_dec_acquire(atomic_t *v)        1352 raw_atomic_fetch_dec_acquire(atomic_t *v)
1353 {                                                1353 {
1354 #if defined(arch_atomic_fetch_dec_acquire)       1354 #if defined(arch_atomic_fetch_dec_acquire)
1355         return arch_atomic_fetch_dec_acquire(    1355         return arch_atomic_fetch_dec_acquire(v);
1356 #elif defined(arch_atomic_fetch_dec_relaxed)     1356 #elif defined(arch_atomic_fetch_dec_relaxed)
1357         int ret = arch_atomic_fetch_dec_relax    1357         int ret = arch_atomic_fetch_dec_relaxed(v);
1358         __atomic_acquire_fence();                1358         __atomic_acquire_fence();
1359         return ret;                              1359         return ret;
1360 #elif defined(arch_atomic_fetch_dec)             1360 #elif defined(arch_atomic_fetch_dec)
1361         return arch_atomic_fetch_dec(v);         1361         return arch_atomic_fetch_dec(v);
1362 #else                                            1362 #else
1363         return raw_atomic_fetch_sub_acquire(1    1363         return raw_atomic_fetch_sub_acquire(1, v);
1364 #endif                                           1364 #endif
1365 }                                                1365 }
1366                                                  1366 
1367 /**                                              1367 /**
1368  * raw_atomic_fetch_dec_release() - atomic de    1368  * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
1369  * @v: pointer to atomic_t                       1369  * @v: pointer to atomic_t
1370  *                                               1370  *
1371  * Atomically updates @v to (@v - 1) with rel    1371  * Atomically updates @v to (@v - 1) with release ordering.
1372  *                                               1372  *
1373  * Safe to use in noinstr code; prefer atomic    1373  * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
1374  *                                               1374  *
1375  * Return: The original value of @v.             1375  * Return: The original value of @v.
1376  */                                              1376  */
1377 static __always_inline int                       1377 static __always_inline int
1378 raw_atomic_fetch_dec_release(atomic_t *v)        1378 raw_atomic_fetch_dec_release(atomic_t *v)
1379 {                                                1379 {
1380 #if defined(arch_atomic_fetch_dec_release)       1380 #if defined(arch_atomic_fetch_dec_release)
1381         return arch_atomic_fetch_dec_release(    1381         return arch_atomic_fetch_dec_release(v);
1382 #elif defined(arch_atomic_fetch_dec_relaxed)     1382 #elif defined(arch_atomic_fetch_dec_relaxed)
1383         __atomic_release_fence();                1383         __atomic_release_fence();
1384         return arch_atomic_fetch_dec_relaxed(    1384         return arch_atomic_fetch_dec_relaxed(v);
1385 #elif defined(arch_atomic_fetch_dec)             1385 #elif defined(arch_atomic_fetch_dec)
1386         return arch_atomic_fetch_dec(v);         1386         return arch_atomic_fetch_dec(v);
1387 #else                                            1387 #else
1388         return raw_atomic_fetch_sub_release(1    1388         return raw_atomic_fetch_sub_release(1, v);
1389 #endif                                           1389 #endif
1390 }                                                1390 }
1391                                                  1391 
1392 /**                                              1392 /**
1393  * raw_atomic_fetch_dec_relaxed() - atomic de    1393  * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
1394  * @v: pointer to atomic_t                       1394  * @v: pointer to atomic_t
1395  *                                               1395  *
1396  * Atomically updates @v to (@v - 1) with rel    1396  * Atomically updates @v to (@v - 1) with relaxed ordering.
1397  *                                               1397  *
1398  * Safe to use in noinstr code; prefer atomic    1398  * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
1399  *                                               1399  *
1400  * Return: The original value of @v.             1400  * Return: The original value of @v.
1401  */                                              1401  */
1402 static __always_inline int                       1402 static __always_inline int
1403 raw_atomic_fetch_dec_relaxed(atomic_t *v)        1403 raw_atomic_fetch_dec_relaxed(atomic_t *v)
1404 {                                                1404 {
1405 #if defined(arch_atomic_fetch_dec_relaxed)       1405 #if defined(arch_atomic_fetch_dec_relaxed)
1406         return arch_atomic_fetch_dec_relaxed(    1406         return arch_atomic_fetch_dec_relaxed(v);
1407 #elif defined(arch_atomic_fetch_dec)             1407 #elif defined(arch_atomic_fetch_dec)
1408         return arch_atomic_fetch_dec(v);         1408         return arch_atomic_fetch_dec(v);
1409 #else                                            1409 #else
1410         return raw_atomic_fetch_sub_relaxed(1    1410         return raw_atomic_fetch_sub_relaxed(1, v);
1411 #endif                                           1411 #endif
1412 }                                                1412 }
1413                                                  1413 
1414 /**                                              1414 /**
1415  * raw_atomic_and() - atomic bitwise AND with    1415  * raw_atomic_and() - atomic bitwise AND with relaxed ordering
1416  * @i: int value                                 1416  * @i: int value
1417  * @v: pointer to atomic_t                       1417  * @v: pointer to atomic_t
1418  *                                               1418  *
1419  * Atomically updates @v to (@v & @i) with re    1419  * Atomically updates @v to (@v & @i) with relaxed ordering.
1420  *                                               1420  *
1421  * Safe to use in noinstr code; prefer atomic    1421  * Safe to use in noinstr code; prefer atomic_and() elsewhere.
1422  *                                               1422  *
1423  * Return: Nothing.                              1423  * Return: Nothing.
1424  */                                              1424  */
1425 static __always_inline void                      1425 static __always_inline void
1426 raw_atomic_and(int i, atomic_t *v)               1426 raw_atomic_and(int i, atomic_t *v)
1427 {                                                1427 {
1428         arch_atomic_and(i, v);                   1428         arch_atomic_and(i, v);
1429 }                                                1429 }
1430                                                  1430 
1431 /**                                              1431 /**
1432  * raw_atomic_fetch_and() - atomic bitwise AN    1432  * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
1433  * @i: int value                                 1433  * @i: int value
1434  * @v: pointer to atomic_t                       1434  * @v: pointer to atomic_t
1435  *                                               1435  *
1436  * Atomically updates @v to (@v & @i) with fu    1436  * Atomically updates @v to (@v & @i) with full ordering.
1437  *                                               1437  *
1438  * Safe to use in noinstr code; prefer atomic    1438  * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
1439  *                                               1439  *
1440  * Return: The original value of @v.             1440  * Return: The original value of @v.
1441  */                                              1441  */
1442 static __always_inline int                       1442 static __always_inline int
1443 raw_atomic_fetch_and(int i, atomic_t *v)         1443 raw_atomic_fetch_and(int i, atomic_t *v)
1444 {                                                1444 {
1445 #if defined(arch_atomic_fetch_and)               1445 #if defined(arch_atomic_fetch_and)
1446         return arch_atomic_fetch_and(i, v);      1446         return arch_atomic_fetch_and(i, v);
1447 #elif defined(arch_atomic_fetch_and_relaxed)     1447 #elif defined(arch_atomic_fetch_and_relaxed)
1448         int ret;                                 1448         int ret;
1449         __atomic_pre_full_fence();               1449         __atomic_pre_full_fence();
1450         ret = arch_atomic_fetch_and_relaxed(i    1450         ret = arch_atomic_fetch_and_relaxed(i, v);
1451         __atomic_post_full_fence();              1451         __atomic_post_full_fence();
1452         return ret;                              1452         return ret;
1453 #else                                            1453 #else
1454 #error "Unable to define raw_atomic_fetch_and    1454 #error "Unable to define raw_atomic_fetch_and"
1455 #endif                                           1455 #endif
1456 }                                                1456 }
1457                                                  1457 
1458 /**                                              1458 /**
1459  * raw_atomic_fetch_and_acquire() - atomic bi    1459  * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
1460  * @i: int value                                 1460  * @i: int value
1461  * @v: pointer to atomic_t                       1461  * @v: pointer to atomic_t
1462  *                                               1462  *
1463  * Atomically updates @v to (@v & @i) with ac    1463  * Atomically updates @v to (@v & @i) with acquire ordering.
1464  *                                               1464  *
1465  * Safe to use in noinstr code; prefer atomic    1465  * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
1466  *                                               1466  *
1467  * Return: The original value of @v.             1467  * Return: The original value of @v.
1468  */                                              1468  */
1469 static __always_inline int                       1469 static __always_inline int
1470 raw_atomic_fetch_and_acquire(int i, atomic_t     1470 raw_atomic_fetch_and_acquire(int i, atomic_t *v)
1471 {                                                1471 {
1472 #if defined(arch_atomic_fetch_and_acquire)       1472 #if defined(arch_atomic_fetch_and_acquire)
1473         return arch_atomic_fetch_and_acquire(    1473         return arch_atomic_fetch_and_acquire(i, v);
1474 #elif defined(arch_atomic_fetch_and_relaxed)     1474 #elif defined(arch_atomic_fetch_and_relaxed)
1475         int ret = arch_atomic_fetch_and_relax    1475         int ret = arch_atomic_fetch_and_relaxed(i, v);
1476         __atomic_acquire_fence();                1476         __atomic_acquire_fence();
1477         return ret;                              1477         return ret;
1478 #elif defined(arch_atomic_fetch_and)             1478 #elif defined(arch_atomic_fetch_and)
1479         return arch_atomic_fetch_and(i, v);      1479         return arch_atomic_fetch_and(i, v);
1480 #else                                            1480 #else
1481 #error "Unable to define raw_atomic_fetch_and    1481 #error "Unable to define raw_atomic_fetch_and_acquire"
1482 #endif                                           1482 #endif
1483 }                                                1483 }
1484                                                  1484 
1485 /**                                              1485 /**
1486  * raw_atomic_fetch_and_release() - atomic bi    1486  * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
1487  * @i: int value                                 1487  * @i: int value
1488  * @v: pointer to atomic_t                       1488  * @v: pointer to atomic_t
1489  *                                               1489  *
1490  * Atomically updates @v to (@v & @i) with re    1490  * Atomically updates @v to (@v & @i) with release ordering.
1491  *                                               1491  *
1492  * Safe to use in noinstr code; prefer atomic    1492  * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
1493  *                                               1493  *
1494  * Return: The original value of @v.             1494  * Return: The original value of @v.
1495  */                                              1495  */
1496 static __always_inline int                       1496 static __always_inline int
1497 raw_atomic_fetch_and_release(int i, atomic_t     1497 raw_atomic_fetch_and_release(int i, atomic_t *v)
1498 {                                                1498 {
1499 #if defined(arch_atomic_fetch_and_release)       1499 #if defined(arch_atomic_fetch_and_release)
1500         return arch_atomic_fetch_and_release(    1500         return arch_atomic_fetch_and_release(i, v);
1501 #elif defined(arch_atomic_fetch_and_relaxed)     1501 #elif defined(arch_atomic_fetch_and_relaxed)
1502         __atomic_release_fence();                1502         __atomic_release_fence();
1503         return arch_atomic_fetch_and_relaxed(    1503         return arch_atomic_fetch_and_relaxed(i, v);
1504 #elif defined(arch_atomic_fetch_and)             1504 #elif defined(arch_atomic_fetch_and)
1505         return arch_atomic_fetch_and(i, v);      1505         return arch_atomic_fetch_and(i, v);
1506 #else                                            1506 #else
1507 #error "Unable to define raw_atomic_fetch_and    1507 #error "Unable to define raw_atomic_fetch_and_release"
1508 #endif                                           1508 #endif
1509 }                                                1509 }
1510                                                  1510 
1511 /**                                              1511 /**
1512  * raw_atomic_fetch_and_relaxed() - atomic bi    1512  * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
1513  * @i: int value                                 1513  * @i: int value
1514  * @v: pointer to atomic_t                       1514  * @v: pointer to atomic_t
1515  *                                               1515  *
1516  * Atomically updates @v to (@v & @i) with re    1516  * Atomically updates @v to (@v & @i) with relaxed ordering.
1517  *                                               1517  *
1518  * Safe to use in noinstr code; prefer atomic    1518  * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
1519  *                                               1519  *
1520  * Return: The original value of @v.             1520  * Return: The original value of @v.
1521  */                                              1521  */
1522 static __always_inline int                       1522 static __always_inline int
1523 raw_atomic_fetch_and_relaxed(int i, atomic_t     1523 raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
1524 {                                                1524 {
1525 #if defined(arch_atomic_fetch_and_relaxed)       1525 #if defined(arch_atomic_fetch_and_relaxed)
1526         return arch_atomic_fetch_and_relaxed(    1526         return arch_atomic_fetch_and_relaxed(i, v);
1527 #elif defined(arch_atomic_fetch_and)             1527 #elif defined(arch_atomic_fetch_and)
1528         return arch_atomic_fetch_and(i, v);      1528         return arch_atomic_fetch_and(i, v);
1529 #else                                            1529 #else
1530 #error "Unable to define raw_atomic_fetch_and    1530 #error "Unable to define raw_atomic_fetch_and_relaxed"
1531 #endif                                           1531 #endif
1532 }                                                1532 }
1533                                                  1533 
1534 /**                                              1534 /**
1535  * raw_atomic_andnot() - atomic bitwise AND N    1535  * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
1536  * @i: int value                                 1536  * @i: int value
1537  * @v: pointer to atomic_t                       1537  * @v: pointer to atomic_t
1538  *                                               1538  *
1539  * Atomically updates @v to (@v & ~@i) with r    1539  * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1540  *                                               1540  *
1541  * Safe to use in noinstr code; prefer atomic    1541  * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
1542  *                                               1542  *
1543  * Return: Nothing.                              1543  * Return: Nothing.
1544  */                                              1544  */
1545 static __always_inline void                      1545 static __always_inline void
1546 raw_atomic_andnot(int i, atomic_t *v)            1546 raw_atomic_andnot(int i, atomic_t *v)
1547 {                                                1547 {
1548 #if defined(arch_atomic_andnot)                  1548 #if defined(arch_atomic_andnot)
1549         arch_atomic_andnot(i, v);                1549         arch_atomic_andnot(i, v);
1550 #else                                            1550 #else
1551         raw_atomic_and(~i, v);                   1551         raw_atomic_and(~i, v);
1552 #endif                                           1552 #endif
1553 }                                                1553 }
1554                                                  1554 
1555 /**                                              1555 /**
1556  * raw_atomic_fetch_andnot() - atomic bitwise    1556  * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
1557  * @i: int value                                 1557  * @i: int value
1558  * @v: pointer to atomic_t                       1558  * @v: pointer to atomic_t
1559  *                                               1559  *
1560  * Atomically updates @v to (@v & ~@i) with f    1560  * Atomically updates @v to (@v & ~@i) with full ordering.
1561  *                                               1561  *
1562  * Safe to use in noinstr code; prefer atomic    1562  * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
1563  *                                               1563  *
1564  * Return: The original value of @v.             1564  * Return: The original value of @v.
1565  */                                              1565  */
1566 static __always_inline int                       1566 static __always_inline int
1567 raw_atomic_fetch_andnot(int i, atomic_t *v)      1567 raw_atomic_fetch_andnot(int i, atomic_t *v)
1568 {                                                1568 {
1569 #if defined(arch_atomic_fetch_andnot)            1569 #if defined(arch_atomic_fetch_andnot)
1570         return arch_atomic_fetch_andnot(i, v)    1570         return arch_atomic_fetch_andnot(i, v);
1571 #elif defined(arch_atomic_fetch_andnot_relaxe    1571 #elif defined(arch_atomic_fetch_andnot_relaxed)
1572         int ret;                                 1572         int ret;
1573         __atomic_pre_full_fence();               1573         __atomic_pre_full_fence();
1574         ret = arch_atomic_fetch_andnot_relaxe    1574         ret = arch_atomic_fetch_andnot_relaxed(i, v);
1575         __atomic_post_full_fence();              1575         __atomic_post_full_fence();
1576         return ret;                              1576         return ret;
1577 #else                                            1577 #else
1578         return raw_atomic_fetch_and(~i, v);      1578         return raw_atomic_fetch_and(~i, v);
1579 #endif                                           1579 #endif
1580 }                                                1580 }
1581                                                  1581 
1582 /**                                              1582 /**
1583  * raw_atomic_fetch_andnot_acquire() - atomic    1583  * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
1584  * @i: int value                                 1584  * @i: int value
1585  * @v: pointer to atomic_t                       1585  * @v: pointer to atomic_t
1586  *                                               1586  *
1587  * Atomically updates @v to (@v & ~@i) with a    1587  * Atomically updates @v to (@v & ~@i) with acquire ordering.
1588  *                                               1588  *
1589  * Safe to use in noinstr code; prefer atomic    1589  * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
1590  *                                               1590  *
1591  * Return: The original value of @v.             1591  * Return: The original value of @v.
1592  */                                              1592  */
1593 static __always_inline int                       1593 static __always_inline int
1594 raw_atomic_fetch_andnot_acquire(int i, atomic    1594 raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1595 {                                                1595 {
1596 #if defined(arch_atomic_fetch_andnot_acquire)    1596 #if defined(arch_atomic_fetch_andnot_acquire)
1597         return arch_atomic_fetch_andnot_acqui    1597         return arch_atomic_fetch_andnot_acquire(i, v);
1598 #elif defined(arch_atomic_fetch_andnot_relaxe    1598 #elif defined(arch_atomic_fetch_andnot_relaxed)
1599         int ret = arch_atomic_fetch_andnot_re    1599         int ret = arch_atomic_fetch_andnot_relaxed(i, v);
1600         __atomic_acquire_fence();                1600         __atomic_acquire_fence();
1601         return ret;                              1601         return ret;
1602 #elif defined(arch_atomic_fetch_andnot)          1602 #elif defined(arch_atomic_fetch_andnot)
1603         return arch_atomic_fetch_andnot(i, v)    1603         return arch_atomic_fetch_andnot(i, v);
1604 #else                                            1604 #else
1605         return raw_atomic_fetch_and_acquire(~    1605         return raw_atomic_fetch_and_acquire(~i, v);
1606 #endif                                           1606 #endif
1607 }                                                1607 }
1608                                                  1608 
1609 /**                                              1609 /**
1610  * raw_atomic_fetch_andnot_release() - atomic    1610  * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
1611  * @i: int value                                 1611  * @i: int value
1612  * @v: pointer to atomic_t                       1612  * @v: pointer to atomic_t
1613  *                                               1613  *
1614  * Atomically updates @v to (@v & ~@i) with r    1614  * Atomically updates @v to (@v & ~@i) with release ordering.
1615  *                                               1615  *
1616  * Safe to use in noinstr code; prefer atomic    1616  * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
1617  *                                               1617  *
1618  * Return: The original value of @v.             1618  * Return: The original value of @v.
1619  */                                              1619  */
1620 static __always_inline int                       1620 static __always_inline int
1621 raw_atomic_fetch_andnot_release(int i, atomic    1621 raw_atomic_fetch_andnot_release(int i, atomic_t *v)
1622 {                                                1622 {
1623 #if defined(arch_atomic_fetch_andnot_release)    1623 #if defined(arch_atomic_fetch_andnot_release)
1624         return arch_atomic_fetch_andnot_relea    1624         return arch_atomic_fetch_andnot_release(i, v);
1625 #elif defined(arch_atomic_fetch_andnot_relaxe    1625 #elif defined(arch_atomic_fetch_andnot_relaxed)
1626         __atomic_release_fence();                1626         __atomic_release_fence();
1627         return arch_atomic_fetch_andnot_relax    1627         return arch_atomic_fetch_andnot_relaxed(i, v);
1628 #elif defined(arch_atomic_fetch_andnot)          1628 #elif defined(arch_atomic_fetch_andnot)
1629         return arch_atomic_fetch_andnot(i, v)    1629         return arch_atomic_fetch_andnot(i, v);
1630 #else                                            1630 #else
1631         return raw_atomic_fetch_and_release(~    1631         return raw_atomic_fetch_and_release(~i, v);
1632 #endif                                           1632 #endif
1633 }                                                1633 }
1634                                                  1634 
1635 /**                                              1635 /**
1636  * raw_atomic_fetch_andnot_relaxed() - atomic    1636  * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
1637  * @i: int value                                 1637  * @i: int value
1638  * @v: pointer to atomic_t                       1638  * @v: pointer to atomic_t
1639  *                                               1639  *
1640  * Atomically updates @v to (@v & ~@i) with r    1640  * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1641  *                                               1641  *
1642  * Safe to use in noinstr code; prefer atomic    1642  * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
1643  *                                               1643  *
1644  * Return: The original value of @v.             1644  * Return: The original value of @v.
1645  */                                              1645  */
1646 static __always_inline int                       1646 static __always_inline int
1647 raw_atomic_fetch_andnot_relaxed(int i, atomic    1647 raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
1648 {                                                1648 {
1649 #if defined(arch_atomic_fetch_andnot_relaxed)    1649 #if defined(arch_atomic_fetch_andnot_relaxed)
1650         return arch_atomic_fetch_andnot_relax    1650         return arch_atomic_fetch_andnot_relaxed(i, v);
1651 #elif defined(arch_atomic_fetch_andnot)          1651 #elif defined(arch_atomic_fetch_andnot)
1652         return arch_atomic_fetch_andnot(i, v)    1652         return arch_atomic_fetch_andnot(i, v);
1653 #else                                            1653 #else
1654         return raw_atomic_fetch_and_relaxed(~    1654         return raw_atomic_fetch_and_relaxed(~i, v);
1655 #endif                                           1655 #endif
1656 }                                                1656 }
1657                                                  1657 
1658 /**                                              1658 /**
1659  * raw_atomic_or() - atomic bitwise OR with r    1659  * raw_atomic_or() - atomic bitwise OR with relaxed ordering
1660  * @i: int value                                 1660  * @i: int value
1661  * @v: pointer to atomic_t                       1661  * @v: pointer to atomic_t
1662  *                                               1662  *
1663  * Atomically updates @v to (@v | @i) with re    1663  * Atomically updates @v to (@v | @i) with relaxed ordering.
1664  *                                               1664  *
1665  * Safe to use in noinstr code; prefer atomic    1665  * Safe to use in noinstr code; prefer atomic_or() elsewhere.
1666  *                                               1666  *
1667  * Return: Nothing.                              1667  * Return: Nothing.
1668  */                                              1668  */
1669 static __always_inline void                      1669 static __always_inline void
1670 raw_atomic_or(int i, atomic_t *v)                1670 raw_atomic_or(int i, atomic_t *v)
1671 {                                                1671 {
1672         arch_atomic_or(i, v);                    1672         arch_atomic_or(i, v);
1673 }                                                1673 }
1674                                                  1674 
1675 /**                                              1675 /**
1676  * raw_atomic_fetch_or() - atomic bitwise OR     1676  * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
1677  * @i: int value                                 1677  * @i: int value
1678  * @v: pointer to atomic_t                       1678  * @v: pointer to atomic_t
1679  *                                               1679  *
1680  * Atomically updates @v to (@v | @i) with fu    1680  * Atomically updates @v to (@v | @i) with full ordering.
1681  *                                               1681  *
1682  * Safe to use in noinstr code; prefer atomic    1682  * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
1683  *                                               1683  *
1684  * Return: The original value of @v.             1684  * Return: The original value of @v.
1685  */                                              1685  */
1686 static __always_inline int                       1686 static __always_inline int
1687 raw_atomic_fetch_or(int i, atomic_t *v)          1687 raw_atomic_fetch_or(int i, atomic_t *v)
1688 {                                                1688 {
1689 #if defined(arch_atomic_fetch_or)                1689 #if defined(arch_atomic_fetch_or)
1690         return arch_atomic_fetch_or(i, v);       1690         return arch_atomic_fetch_or(i, v);
1691 #elif defined(arch_atomic_fetch_or_relaxed)      1691 #elif defined(arch_atomic_fetch_or_relaxed)
1692         int ret;                                 1692         int ret;
1693         __atomic_pre_full_fence();               1693         __atomic_pre_full_fence();
1694         ret = arch_atomic_fetch_or_relaxed(i,    1694         ret = arch_atomic_fetch_or_relaxed(i, v);
1695         __atomic_post_full_fence();              1695         __atomic_post_full_fence();
1696         return ret;                              1696         return ret;
1697 #else                                            1697 #else
1698 #error "Unable to define raw_atomic_fetch_or"    1698 #error "Unable to define raw_atomic_fetch_or"
1699 #endif                                           1699 #endif
1700 }                                                1700 }
1701                                                  1701 
1702 /**                                              1702 /**
1703  * raw_atomic_fetch_or_acquire() - atomic bit    1703  * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
1704  * @i: int value                                 1704  * @i: int value
1705  * @v: pointer to atomic_t                       1705  * @v: pointer to atomic_t
1706  *                                               1706  *
1707  * Atomically updates @v to (@v | @i) with ac    1707  * Atomically updates @v to (@v | @i) with acquire ordering.
1708  *                                               1708  *
1709  * Safe to use in noinstr code; prefer atomic    1709  * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
1710  *                                               1710  *
1711  * Return: The original value of @v.             1711  * Return: The original value of @v.
1712  */                                              1712  */
1713 static __always_inline int                       1713 static __always_inline int
1714 raw_atomic_fetch_or_acquire(int i, atomic_t *    1714 raw_atomic_fetch_or_acquire(int i, atomic_t *v)
1715 {                                                1715 {
1716 #if defined(arch_atomic_fetch_or_acquire)        1716 #if defined(arch_atomic_fetch_or_acquire)
1717         return arch_atomic_fetch_or_acquire(i    1717         return arch_atomic_fetch_or_acquire(i, v);
1718 #elif defined(arch_atomic_fetch_or_relaxed)      1718 #elif defined(arch_atomic_fetch_or_relaxed)
1719         int ret = arch_atomic_fetch_or_relaxe    1719         int ret = arch_atomic_fetch_or_relaxed(i, v);
1720         __atomic_acquire_fence();                1720         __atomic_acquire_fence();
1721         return ret;                              1721         return ret;
1722 #elif defined(arch_atomic_fetch_or)              1722 #elif defined(arch_atomic_fetch_or)
1723         return arch_atomic_fetch_or(i, v);       1723         return arch_atomic_fetch_or(i, v);
1724 #else                                            1724 #else
1725 #error "Unable to define raw_atomic_fetch_or_    1725 #error "Unable to define raw_atomic_fetch_or_acquire"
1726 #endif                                           1726 #endif
1727 }                                                1727 }
1728                                                  1728 
1729 /**                                              1729 /**
1730  * raw_atomic_fetch_or_release() - atomic bit    1730  * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
1731  * @i: int value                                 1731  * @i: int value
1732  * @v: pointer to atomic_t                       1732  * @v: pointer to atomic_t
1733  *                                               1733  *
1734  * Atomically updates @v to (@v | @i) with re    1734  * Atomically updates @v to (@v | @i) with release ordering.
1735  *                                               1735  *
1736  * Safe to use in noinstr code; prefer atomic    1736  * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
1737  *                                               1737  *
1738  * Return: The original value of @v.             1738  * Return: The original value of @v.
1739  */                                              1739  */
1740 static __always_inline int                       1740 static __always_inline int
1741 raw_atomic_fetch_or_release(int i, atomic_t *    1741 raw_atomic_fetch_or_release(int i, atomic_t *v)
1742 {                                                1742 {
1743 #if defined(arch_atomic_fetch_or_release)        1743 #if defined(arch_atomic_fetch_or_release)
1744         return arch_atomic_fetch_or_release(i    1744         return arch_atomic_fetch_or_release(i, v);
1745 #elif defined(arch_atomic_fetch_or_relaxed)      1745 #elif defined(arch_atomic_fetch_or_relaxed)
1746         __atomic_release_fence();                1746         __atomic_release_fence();
1747         return arch_atomic_fetch_or_relaxed(i    1747         return arch_atomic_fetch_or_relaxed(i, v);
1748 #elif defined(arch_atomic_fetch_or)              1748 #elif defined(arch_atomic_fetch_or)
1749         return arch_atomic_fetch_or(i, v);       1749         return arch_atomic_fetch_or(i, v);
1750 #else                                            1750 #else
1751 #error "Unable to define raw_atomic_fetch_or_    1751 #error "Unable to define raw_atomic_fetch_or_release"
1752 #endif                                           1752 #endif
1753 }                                                1753 }
1754                                                  1754 
1755 /**                                              1755 /**
1756  * raw_atomic_fetch_or_relaxed() - atomic bit    1756  * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
1757  * @i: int value                                 1757  * @i: int value
1758  * @v: pointer to atomic_t                       1758  * @v: pointer to atomic_t
1759  *                                               1759  *
1760  * Atomically updates @v to (@v | @i) with re    1760  * Atomically updates @v to (@v | @i) with relaxed ordering.
1761  *                                               1761  *
1762  * Safe to use in noinstr code; prefer atomic    1762  * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
1763  *                                               1763  *
1764  * Return: The original value of @v.             1764  * Return: The original value of @v.
1765  */                                              1765  */
1766 static __always_inline int                       1766 static __always_inline int
1767 raw_atomic_fetch_or_relaxed(int i, atomic_t *    1767 raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
1768 {                                                1768 {
1769 #if defined(arch_atomic_fetch_or_relaxed)        1769 #if defined(arch_atomic_fetch_or_relaxed)
1770         return arch_atomic_fetch_or_relaxed(i    1770         return arch_atomic_fetch_or_relaxed(i, v);
1771 #elif defined(arch_atomic_fetch_or)              1771 #elif defined(arch_atomic_fetch_or)
1772         return arch_atomic_fetch_or(i, v);       1772         return arch_atomic_fetch_or(i, v);
1773 #else                                            1773 #else
1774 #error "Unable to define raw_atomic_fetch_or_    1774 #error "Unable to define raw_atomic_fetch_or_relaxed"
1775 #endif                                           1775 #endif
1776 }                                                1776 }
1777                                                  1777 
1778 /**                                              1778 /**
1779  * raw_atomic_xor() - atomic bitwise XOR with    1779  * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
1780  * @i: int value                                 1780  * @i: int value
1781  * @v: pointer to atomic_t                       1781  * @v: pointer to atomic_t
1782  *                                               1782  *
1783  * Atomically updates @v to (@v ^ @i) with re    1783  * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1784  *                                               1784  *
1785  * Safe to use in noinstr code; prefer atomic    1785  * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
1786  *                                               1786  *
1787  * Return: Nothing.                              1787  * Return: Nothing.
1788  */                                              1788  */
1789 static __always_inline void                      1789 static __always_inline void
1790 raw_atomic_xor(int i, atomic_t *v)               1790 raw_atomic_xor(int i, atomic_t *v)
1791 {                                                1791 {
1792         arch_atomic_xor(i, v);                   1792         arch_atomic_xor(i, v);
1793 }                                                1793 }
1794                                                  1794 
1795 /**                                              1795 /**
1796  * raw_atomic_fetch_xor() - atomic bitwise XO    1796  * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
1797  * @i: int value                                 1797  * @i: int value
1798  * @v: pointer to atomic_t                       1798  * @v: pointer to atomic_t
1799  *                                               1799  *
1800  * Atomically updates @v to (@v ^ @i) with fu    1800  * Atomically updates @v to (@v ^ @i) with full ordering.
1801  *                                               1801  *
1802  * Safe to use in noinstr code; prefer atomic    1802  * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
1803  *                                               1803  *
1804  * Return: The original value of @v.             1804  * Return: The original value of @v.
1805  */                                              1805  */
1806 static __always_inline int                       1806 static __always_inline int
1807 raw_atomic_fetch_xor(int i, atomic_t *v)         1807 raw_atomic_fetch_xor(int i, atomic_t *v)
1808 {                                                1808 {
1809 #if defined(arch_atomic_fetch_xor)               1809 #if defined(arch_atomic_fetch_xor)
1810         return arch_atomic_fetch_xor(i, v);      1810         return arch_atomic_fetch_xor(i, v);
1811 #elif defined(arch_atomic_fetch_xor_relaxed)     1811 #elif defined(arch_atomic_fetch_xor_relaxed)
1812         int ret;                                 1812         int ret;
1813         __atomic_pre_full_fence();               1813         __atomic_pre_full_fence();
1814         ret = arch_atomic_fetch_xor_relaxed(i    1814         ret = arch_atomic_fetch_xor_relaxed(i, v);
1815         __atomic_post_full_fence();              1815         __atomic_post_full_fence();
1816         return ret;                              1816         return ret;
1817 #else                                            1817 #else
1818 #error "Unable to define raw_atomic_fetch_xor    1818 #error "Unable to define raw_atomic_fetch_xor"
1819 #endif                                           1819 #endif
1820 }                                                1820 }
1821                                                  1821 
1822 /**                                              1822 /**
1823  * raw_atomic_fetch_xor_acquire() - atomic bi    1823  * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
1824  * @i: int value                                 1824  * @i: int value
1825  * @v: pointer to atomic_t                       1825  * @v: pointer to atomic_t
1826  *                                               1826  *
1827  * Atomically updates @v to (@v ^ @i) with ac    1827  * Atomically updates @v to (@v ^ @i) with acquire ordering.
1828  *                                               1828  *
1829  * Safe to use in noinstr code; prefer atomic    1829  * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
1830  *                                               1830  *
1831  * Return: The original value of @v.             1831  * Return: The original value of @v.
1832  */                                              1832  */
1833 static __always_inline int                       1833 static __always_inline int
1834 raw_atomic_fetch_xor_acquire(int i, atomic_t     1834 raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
1835 {                                                1835 {
1836 #if defined(arch_atomic_fetch_xor_acquire)       1836 #if defined(arch_atomic_fetch_xor_acquire)
1837         return arch_atomic_fetch_xor_acquire(    1837         return arch_atomic_fetch_xor_acquire(i, v);
1838 #elif defined(arch_atomic_fetch_xor_relaxed)     1838 #elif defined(arch_atomic_fetch_xor_relaxed)
1839         int ret = arch_atomic_fetch_xor_relax    1839         int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840         __atomic_acquire_fence();                1840         __atomic_acquire_fence();
1841         return ret;                              1841         return ret;
1842 #elif defined(arch_atomic_fetch_xor)             1842 #elif defined(arch_atomic_fetch_xor)
1843         return arch_atomic_fetch_xor(i, v);      1843         return arch_atomic_fetch_xor(i, v);
1844 #else                                            1844 #else
1845 #error "Unable to define raw_atomic_fetch_xor    1845 #error "Unable to define raw_atomic_fetch_xor_acquire"
1846 #endif                                           1846 #endif
1847 }                                                1847 }
1848                                                  1848 
1849 /**                                              1849 /**
1850  * raw_atomic_fetch_xor_release() - atomic bi    1850  * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
1851  * @i: int value                                 1851  * @i: int value
1852  * @v: pointer to atomic_t                       1852  * @v: pointer to atomic_t
1853  *                                               1853  *
1854  * Atomically updates @v to (@v ^ @i) with re    1854  * Atomically updates @v to (@v ^ @i) with release ordering.
1855  *                                               1855  *
1856  * Safe to use in noinstr code; prefer atomic    1856  * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
1857  *                                               1857  *
1858  * Return: The original value of @v.             1858  * Return: The original value of @v.
1859  */                                              1859  */
1860 static __always_inline int                       1860 static __always_inline int
1861 raw_atomic_fetch_xor_release(int i, atomic_t     1861 raw_atomic_fetch_xor_release(int i, atomic_t *v)
1862 {                                                1862 {
1863 #if defined(arch_atomic_fetch_xor_release)       1863 #if defined(arch_atomic_fetch_xor_release)
1864         return arch_atomic_fetch_xor_release(    1864         return arch_atomic_fetch_xor_release(i, v);
1865 #elif defined(arch_atomic_fetch_xor_relaxed)     1865 #elif defined(arch_atomic_fetch_xor_relaxed)
1866         __atomic_release_fence();                1866         __atomic_release_fence();
1867         return arch_atomic_fetch_xor_relaxed(    1867         return arch_atomic_fetch_xor_relaxed(i, v);
1868 #elif defined(arch_atomic_fetch_xor)             1868 #elif defined(arch_atomic_fetch_xor)
1869         return arch_atomic_fetch_xor(i, v);      1869         return arch_atomic_fetch_xor(i, v);
1870 #else                                            1870 #else
1871 #error "Unable to define raw_atomic_fetch_xor    1871 #error "Unable to define raw_atomic_fetch_xor_release"
1872 #endif                                           1872 #endif
1873 }                                                1873 }
1874                                                  1874 
1875 /**                                              1875 /**
1876  * raw_atomic_fetch_xor_relaxed() - atomic bi    1876  * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
1877  * @i: int value                                 1877  * @i: int value
1878  * @v: pointer to atomic_t                       1878  * @v: pointer to atomic_t
1879  *                                               1879  *
1880  * Atomically updates @v to (@v ^ @i) with re    1880  * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1881  *                                               1881  *
1882  * Safe to use in noinstr code; prefer atomic    1882  * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
1883  *                                               1883  *
1884  * Return: The original value of @v.             1884  * Return: The original value of @v.
1885  */                                              1885  */
1886 static __always_inline int                       1886 static __always_inline int
1887 raw_atomic_fetch_xor_relaxed(int i, atomic_t     1887 raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
1888 {                                                1888 {
1889 #if defined(arch_atomic_fetch_xor_relaxed)       1889 #if defined(arch_atomic_fetch_xor_relaxed)
1890         return arch_atomic_fetch_xor_relaxed(    1890         return arch_atomic_fetch_xor_relaxed(i, v);
1891 #elif defined(arch_atomic_fetch_xor)             1891 #elif defined(arch_atomic_fetch_xor)
1892         return arch_atomic_fetch_xor(i, v);      1892         return arch_atomic_fetch_xor(i, v);
1893 #else                                            1893 #else
1894 #error "Unable to define raw_atomic_fetch_xor    1894 #error "Unable to define raw_atomic_fetch_xor_relaxed"
1895 #endif                                           1895 #endif
1896 }                                                1896 }
1897                                                  1897 
1898 /**                                              1898 /**
1899  * raw_atomic_xchg() - atomic exchange with f    1899  * raw_atomic_xchg() - atomic exchange with full ordering
1900  * @v: pointer to atomic_t                       1900  * @v: pointer to atomic_t
1901  * @new: int value to assign                     1901  * @new: int value to assign
1902  *                                               1902  *
1903  * Atomically updates @v to @new with full or    1903  * Atomically updates @v to @new with full ordering.
1904  *                                               1904  *
1905  * Safe to use in noinstr code; prefer atomic    1905  * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
1906  *                                               1906  *
1907  * Return: The original value of @v.             1907  * Return: The original value of @v.
1908  */                                              1908  */
1909 static __always_inline int                       1909 static __always_inline int
1910 raw_atomic_xchg(atomic_t *v, int new)            1910 raw_atomic_xchg(atomic_t *v, int new)
1911 {                                                1911 {
1912 #if defined(arch_atomic_xchg)                    1912 #if defined(arch_atomic_xchg)
1913         return arch_atomic_xchg(v, new);         1913         return arch_atomic_xchg(v, new);
1914 #elif defined(arch_atomic_xchg_relaxed)          1914 #elif defined(arch_atomic_xchg_relaxed)
1915         int ret;                                 1915         int ret;
1916         __atomic_pre_full_fence();               1916         __atomic_pre_full_fence();
1917         ret = arch_atomic_xchg_relaxed(v, new    1917         ret = arch_atomic_xchg_relaxed(v, new);
1918         __atomic_post_full_fence();              1918         __atomic_post_full_fence();
1919         return ret;                              1919         return ret;
1920 #else                                            1920 #else
1921         return raw_xchg(&v->counter, new);       1921         return raw_xchg(&v->counter, new);
1922 #endif                                           1922 #endif
1923 }                                                1923 }
1924                                                  1924 
1925 /**                                              1925 /**
1926  * raw_atomic_xchg_acquire() - atomic exchang    1926  * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
1927  * @v: pointer to atomic_t                       1927  * @v: pointer to atomic_t
1928  * @new: int value to assign                     1928  * @new: int value to assign
1929  *                                               1929  *
1930  * Atomically updates @v to @new with acquire    1930  * Atomically updates @v to @new with acquire ordering.
1931  *                                               1931  *
1932  * Safe to use in noinstr code; prefer atomic    1932  * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
1933  *                                               1933  *
1934  * Return: The original value of @v.             1934  * Return: The original value of @v.
1935  */                                              1935  */
1936 static __always_inline int                       1936 static __always_inline int
1937 raw_atomic_xchg_acquire(atomic_t *v, int new)    1937 raw_atomic_xchg_acquire(atomic_t *v, int new)
1938 {                                                1938 {
1939 #if defined(arch_atomic_xchg_acquire)            1939 #if defined(arch_atomic_xchg_acquire)
1940         return arch_atomic_xchg_acquire(v, ne    1940         return arch_atomic_xchg_acquire(v, new);
1941 #elif defined(arch_atomic_xchg_relaxed)          1941 #elif defined(arch_atomic_xchg_relaxed)
1942         int ret = arch_atomic_xchg_relaxed(v,    1942         int ret = arch_atomic_xchg_relaxed(v, new);
1943         __atomic_acquire_fence();                1943         __atomic_acquire_fence();
1944         return ret;                              1944         return ret;
1945 #elif defined(arch_atomic_xchg)                  1945 #elif defined(arch_atomic_xchg)
1946         return arch_atomic_xchg(v, new);         1946         return arch_atomic_xchg(v, new);
1947 #else                                            1947 #else
1948         return raw_xchg_acquire(&v->counter,     1948         return raw_xchg_acquire(&v->counter, new);
1949 #endif                                           1949 #endif
1950 }                                                1950 }
1951                                                  1951 
1952 /**                                              1952 /**
1953  * raw_atomic_xchg_release() - atomic exchang    1953  * raw_atomic_xchg_release() - atomic exchange with release ordering
1954  * @v: pointer to atomic_t                       1954  * @v: pointer to atomic_t
1955  * @new: int value to assign                     1955  * @new: int value to assign
1956  *                                               1956  *
1957  * Atomically updates @v to @new with release    1957  * Atomically updates @v to @new with release ordering.
1958  *                                               1958  *
1959  * Safe to use in noinstr code; prefer atomic    1959  * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
1960  *                                               1960  *
1961  * Return: The original value of @v.             1961  * Return: The original value of @v.
1962  */                                              1962  */
1963 static __always_inline int                       1963 static __always_inline int
1964 raw_atomic_xchg_release(atomic_t *v, int new)    1964 raw_atomic_xchg_release(atomic_t *v, int new)
1965 {                                                1965 {
1966 #if defined(arch_atomic_xchg_release)            1966 #if defined(arch_atomic_xchg_release)
1967         return arch_atomic_xchg_release(v, ne    1967         return arch_atomic_xchg_release(v, new);
1968 #elif defined(arch_atomic_xchg_relaxed)          1968 #elif defined(arch_atomic_xchg_relaxed)
1969         __atomic_release_fence();                1969         __atomic_release_fence();
1970         return arch_atomic_xchg_relaxed(v, ne    1970         return arch_atomic_xchg_relaxed(v, new);
1971 #elif defined(arch_atomic_xchg)                  1971 #elif defined(arch_atomic_xchg)
1972         return arch_atomic_xchg(v, new);         1972         return arch_atomic_xchg(v, new);
1973 #else                                            1973 #else
1974         return raw_xchg_release(&v->counter,     1974         return raw_xchg_release(&v->counter, new);
1975 #endif                                           1975 #endif
1976 }                                                1976 }
1977                                                  1977 
1978 /**                                              1978 /**
1979  * raw_atomic_xchg_relaxed() - atomic exchang    1979  * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
1980  * @v: pointer to atomic_t                       1980  * @v: pointer to atomic_t
1981  * @new: int value to assign                     1981  * @new: int value to assign
1982  *                                               1982  *
1983  * Atomically updates @v to @new with relaxed    1983  * Atomically updates @v to @new with relaxed ordering.
1984  *                                               1984  *
1985  * Safe to use in noinstr code; prefer atomic    1985  * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
1986  *                                               1986  *
1987  * Return: The original value of @v.             1987  * Return: The original value of @v.
1988  */                                              1988  */
1989 static __always_inline int                       1989 static __always_inline int
1990 raw_atomic_xchg_relaxed(atomic_t *v, int new)    1990 raw_atomic_xchg_relaxed(atomic_t *v, int new)
1991 {                                                1991 {
1992 #if defined(arch_atomic_xchg_relaxed)            1992 #if defined(arch_atomic_xchg_relaxed)
1993         return arch_atomic_xchg_relaxed(v, ne    1993         return arch_atomic_xchg_relaxed(v, new);
1994 #elif defined(arch_atomic_xchg)                  1994 #elif defined(arch_atomic_xchg)
1995         return arch_atomic_xchg(v, new);         1995         return arch_atomic_xchg(v, new);
1996 #else                                            1996 #else
1997         return raw_xchg_relaxed(&v->counter,     1997         return raw_xchg_relaxed(&v->counter, new);
1998 #endif                                           1998 #endif
1999 }                                                1999 }
2000                                                  2000 
2001 /**                                              2001 /**
2002  * raw_atomic_cmpxchg() - atomic compare and     2002  * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
2003  * @v: pointer to atomic_t                       2003  * @v: pointer to atomic_t
2004  * @old: int value to compare with               2004  * @old: int value to compare with
2005  * @new: int value to assign                     2005  * @new: int value to assign
2006  *                                               2006  *
2007  * If (@v == @old), atomically updates @v to     2007  * If (@v == @old), atomically updates @v to @new with full ordering.
2008  * Otherwise, @v is not modified and relaxed     2008  * Otherwise, @v is not modified and relaxed ordering is provided.
2009  *                                               2009  *
2010  * Safe to use in noinstr code; prefer atomic    2010  * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
2011  *                                               2011  *
2012  * Return: The original value of @v.             2012  * Return: The original value of @v.
2013  */                                              2013  */
2014 static __always_inline int                       2014 static __always_inline int
2015 raw_atomic_cmpxchg(atomic_t *v, int old, int     2015 raw_atomic_cmpxchg(atomic_t *v, int old, int new)
2016 {                                                2016 {
2017 #if defined(arch_atomic_cmpxchg)                 2017 #if defined(arch_atomic_cmpxchg)
2018         return arch_atomic_cmpxchg(v, old, ne    2018         return arch_atomic_cmpxchg(v, old, new);
2019 #elif defined(arch_atomic_cmpxchg_relaxed)       2019 #elif defined(arch_atomic_cmpxchg_relaxed)
2020         int ret;                                 2020         int ret;
2021         __atomic_pre_full_fence();               2021         __atomic_pre_full_fence();
2022         ret = arch_atomic_cmpxchg_relaxed(v,     2022         ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2023         __atomic_post_full_fence();              2023         __atomic_post_full_fence();
2024         return ret;                              2024         return ret;
2025 #else                                            2025 #else
2026         return raw_cmpxchg(&v->counter, old,     2026         return raw_cmpxchg(&v->counter, old, new);
2027 #endif                                           2027 #endif
2028 }                                                2028 }
2029                                                  2029 
2030 /**                                              2030 /**
2031  * raw_atomic_cmpxchg_acquire() - atomic comp    2031  * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2032  * @v: pointer to atomic_t                       2032  * @v: pointer to atomic_t
2033  * @old: int value to compare with               2033  * @old: int value to compare with
2034  * @new: int value to assign                     2034  * @new: int value to assign
2035  *                                               2035  *
2036  * If (@v == @old), atomically updates @v to     2036  * If (@v == @old), atomically updates @v to @new with acquire ordering.
2037  * Otherwise, @v is not modified and relaxed     2037  * Otherwise, @v is not modified and relaxed ordering is provided.
2038  *                                               2038  *
2039  * Safe to use in noinstr code; prefer atomic    2039  * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
2040  *                                               2040  *
2041  * Return: The original value of @v.             2041  * Return: The original value of @v.
2042  */                                              2042  */
2043 static __always_inline int                       2043 static __always_inline int
2044 raw_atomic_cmpxchg_acquire(atomic_t *v, int o    2044 raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2045 {                                                2045 {
2046 #if defined(arch_atomic_cmpxchg_acquire)         2046 #if defined(arch_atomic_cmpxchg_acquire)
2047         return arch_atomic_cmpxchg_acquire(v,    2047         return arch_atomic_cmpxchg_acquire(v, old, new);
2048 #elif defined(arch_atomic_cmpxchg_relaxed)       2048 #elif defined(arch_atomic_cmpxchg_relaxed)
2049         int ret = arch_atomic_cmpxchg_relaxed    2049         int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2050         __atomic_acquire_fence();                2050         __atomic_acquire_fence();
2051         return ret;                              2051         return ret;
2052 #elif defined(arch_atomic_cmpxchg)               2052 #elif defined(arch_atomic_cmpxchg)
2053         return arch_atomic_cmpxchg(v, old, ne    2053         return arch_atomic_cmpxchg(v, old, new);
2054 #else                                            2054 #else
2055         return raw_cmpxchg_acquire(&v->counte    2055         return raw_cmpxchg_acquire(&v->counter, old, new);
2056 #endif                                           2056 #endif
2057 }                                                2057 }
2058                                                  2058 
2059 /**                                              2059 /**
2060  * raw_atomic_cmpxchg_release() - atomic comp    2060  * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
2061  * @v: pointer to atomic_t                       2061  * @v: pointer to atomic_t
2062  * @old: int value to compare with               2062  * @old: int value to compare with
2063  * @new: int value to assign                     2063  * @new: int value to assign
2064  *                                               2064  *
2065  * If (@v == @old), atomically updates @v to     2065  * If (@v == @old), atomically updates @v to @new with release ordering.
2066  * Otherwise, @v is not modified and relaxed     2066  * Otherwise, @v is not modified and relaxed ordering is provided.
2067  *                                               2067  *
2068  * Safe to use in noinstr code; prefer atomic    2068  * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
2069  *                                               2069  *
2070  * Return: The original value of @v.             2070  * Return: The original value of @v.
2071  */                                              2071  */
2072 static __always_inline int                       2072 static __always_inline int
2073 raw_atomic_cmpxchg_release(atomic_t *v, int o    2073 raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2074 {                                                2074 {
2075 #if defined(arch_atomic_cmpxchg_release)         2075 #if defined(arch_atomic_cmpxchg_release)
2076         return arch_atomic_cmpxchg_release(v,    2076         return arch_atomic_cmpxchg_release(v, old, new);
2077 #elif defined(arch_atomic_cmpxchg_relaxed)       2077 #elif defined(arch_atomic_cmpxchg_relaxed)
2078         __atomic_release_fence();                2078         __atomic_release_fence();
2079         return arch_atomic_cmpxchg_relaxed(v,    2079         return arch_atomic_cmpxchg_relaxed(v, old, new);
2080 #elif defined(arch_atomic_cmpxchg)               2080 #elif defined(arch_atomic_cmpxchg)
2081         return arch_atomic_cmpxchg(v, old, ne    2081         return arch_atomic_cmpxchg(v, old, new);
2082 #else                                            2082 #else
2083         return raw_cmpxchg_release(&v->counte    2083         return raw_cmpxchg_release(&v->counter, old, new);
2084 #endif                                           2084 #endif
2085 }                                                2085 }
2086                                                  2086 
2087 /**                                              2087 /**
2088  * raw_atomic_cmpxchg_relaxed() - atomic comp    2088  * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2089  * @v: pointer to atomic_t                       2089  * @v: pointer to atomic_t
2090  * @old: int value to compare with               2090  * @old: int value to compare with
2091  * @new: int value to assign                     2091  * @new: int value to assign
2092  *                                               2092  *
2093  * If (@v == @old), atomically updates @v to     2093  * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2094  * Otherwise, @v is not modified and relaxed     2094  * Otherwise, @v is not modified and relaxed ordering is provided.
2095  *                                               2095  *
2096  * Safe to use in noinstr code; prefer atomic    2096  * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
2097  *                                               2097  *
2098  * Return: The original value of @v.             2098  * Return: The original value of @v.
2099  */                                              2099  */
2100 static __always_inline int                       2100 static __always_inline int
2101 raw_atomic_cmpxchg_relaxed(atomic_t *v, int o    2101 raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
2102 {                                                2102 {
2103 #if defined(arch_atomic_cmpxchg_relaxed)         2103 #if defined(arch_atomic_cmpxchg_relaxed)
2104         return arch_atomic_cmpxchg_relaxed(v,    2104         return arch_atomic_cmpxchg_relaxed(v, old, new);
2105 #elif defined(arch_atomic_cmpxchg)               2105 #elif defined(arch_atomic_cmpxchg)
2106         return arch_atomic_cmpxchg(v, old, ne    2106         return arch_atomic_cmpxchg(v, old, new);
2107 #else                                            2107 #else
2108         return raw_cmpxchg_relaxed(&v->counte    2108         return raw_cmpxchg_relaxed(&v->counter, old, new);
2109 #endif                                           2109 #endif
2110 }                                                2110 }
2111                                                  2111 
2112 /**                                              2112 /**
2113  * raw_atomic_try_cmpxchg() - atomic compare     2113  * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
2114  * @v: pointer to atomic_t                       2114  * @v: pointer to atomic_t
2115  * @old: pointer to int value to compare with    2115  * @old: pointer to int value to compare with
2116  * @new: int value to assign                     2116  * @new: int value to assign
2117  *                                               2117  *
2118  * If (@v == @old), atomically updates @v to     2118  * If (@v == @old), atomically updates @v to @new with full ordering.
2119  * Otherwise, @v is not modified, @old is upd    2119  * Otherwise, @v is not modified, @old is updated to the current value of @v,
2120  * and relaxed ordering is provided.             2120  * and relaxed ordering is provided.
2121  *                                               2121  *
2122  * Safe to use in noinstr code; prefer atomic    2122  * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
2123  *                                               2123  *
2124  * Return: @true if the exchange occured, @fa    2124  * Return: @true if the exchange occured, @false otherwise.
2125  */                                              2125  */
2126 static __always_inline bool                      2126 static __always_inline bool
2127 raw_atomic_try_cmpxchg(atomic_t *v, int *old,    2127 raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2128 {                                                2128 {
2129 #if defined(arch_atomic_try_cmpxchg)             2129 #if defined(arch_atomic_try_cmpxchg)
2130         return arch_atomic_try_cmpxchg(v, old    2130         return arch_atomic_try_cmpxchg(v, old, new);
2131 #elif defined(arch_atomic_try_cmpxchg_relaxed    2131 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2132         bool ret;                                2132         bool ret;
2133         __atomic_pre_full_fence();               2133         __atomic_pre_full_fence();
2134         ret = arch_atomic_try_cmpxchg_relaxed    2134         ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2135         __atomic_post_full_fence();              2135         __atomic_post_full_fence();
2136         return ret;                              2136         return ret;
2137 #else                                            2137 #else
2138         int r, o = *old;                         2138         int r, o = *old;
2139         r = raw_atomic_cmpxchg(v, o, new);       2139         r = raw_atomic_cmpxchg(v, o, new);
2140         if (unlikely(r != o))                    2140         if (unlikely(r != o))
2141                 *old = r;                        2141                 *old = r;
2142         return likely(r == o);                   2142         return likely(r == o);
2143 #endif                                           2143 #endif
2144 }                                                2144 }
2145                                                  2145 
2146 /**                                              2146 /**
2147  * raw_atomic_try_cmpxchg_acquire() - atomic     2147  * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2148  * @v: pointer to atomic_t                       2148  * @v: pointer to atomic_t
2149  * @old: pointer to int value to compare with    2149  * @old: pointer to int value to compare with
2150  * @new: int value to assign                     2150  * @new: int value to assign
2151  *                                               2151  *
2152  * If (@v == @old), atomically updates @v to     2152  * If (@v == @old), atomically updates @v to @new with acquire ordering.
2153  * Otherwise, @v is not modified, @old is upd    2153  * Otherwise, @v is not modified, @old is updated to the current value of @v,
2154  * and relaxed ordering is provided.             2154  * and relaxed ordering is provided.
2155  *                                               2155  *
2156  * Safe to use in noinstr code; prefer atomic    2156  * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
2157  *                                               2157  *
2158  * Return: @true if the exchange occured, @fa    2158  * Return: @true if the exchange occured, @false otherwise.
2159  */                                              2159  */
2160 static __always_inline bool                      2160 static __always_inline bool
2161 raw_atomic_try_cmpxchg_acquire(atomic_t *v, i    2161 raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2162 {                                                2162 {
2163 #if defined(arch_atomic_try_cmpxchg_acquire)     2163 #if defined(arch_atomic_try_cmpxchg_acquire)
2164         return arch_atomic_try_cmpxchg_acquir    2164         return arch_atomic_try_cmpxchg_acquire(v, old, new);
2165 #elif defined(arch_atomic_try_cmpxchg_relaxed    2165 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2166         bool ret = arch_atomic_try_cmpxchg_re    2166         bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2167         __atomic_acquire_fence();                2167         __atomic_acquire_fence();
2168         return ret;                              2168         return ret;
2169 #elif defined(arch_atomic_try_cmpxchg)           2169 #elif defined(arch_atomic_try_cmpxchg)
2170         return arch_atomic_try_cmpxchg(v, old    2170         return arch_atomic_try_cmpxchg(v, old, new);
2171 #else                                            2171 #else
2172         int r, o = *old;                         2172         int r, o = *old;
2173         r = raw_atomic_cmpxchg_acquire(v, o,     2173         r = raw_atomic_cmpxchg_acquire(v, o, new);
2174         if (unlikely(r != o))                    2174         if (unlikely(r != o))
2175                 *old = r;                        2175                 *old = r;
2176         return likely(r == o);                   2176         return likely(r == o);
2177 #endif                                           2177 #endif
2178 }                                                2178 }
2179                                                  2179 
2180 /**                                              2180 /**
2181  * raw_atomic_try_cmpxchg_release() - atomic     2181  * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
2182  * @v: pointer to atomic_t                       2182  * @v: pointer to atomic_t
2183  * @old: pointer to int value to compare with    2183  * @old: pointer to int value to compare with
2184  * @new: int value to assign                     2184  * @new: int value to assign
2185  *                                               2185  *
2186  * If (@v == @old), atomically updates @v to     2186  * If (@v == @old), atomically updates @v to @new with release ordering.
2187  * Otherwise, @v is not modified, @old is upd    2187  * Otherwise, @v is not modified, @old is updated to the current value of @v,
2188  * and relaxed ordering is provided.             2188  * and relaxed ordering is provided.
2189  *                                               2189  *
2190  * Safe to use in noinstr code; prefer atomic    2190  * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
2191  *                                               2191  *
2192  * Return: @true if the exchange occured, @fa    2192  * Return: @true if the exchange occured, @false otherwise.
2193  */                                              2193  */
2194 static __always_inline bool                      2194 static __always_inline bool
2195 raw_atomic_try_cmpxchg_release(atomic_t *v, i    2195 raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2196 {                                                2196 {
2197 #if defined(arch_atomic_try_cmpxchg_release)     2197 #if defined(arch_atomic_try_cmpxchg_release)
2198         return arch_atomic_try_cmpxchg_releas    2198         return arch_atomic_try_cmpxchg_release(v, old, new);
2199 #elif defined(arch_atomic_try_cmpxchg_relaxed    2199 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2200         __atomic_release_fence();                2200         __atomic_release_fence();
2201         return arch_atomic_try_cmpxchg_relaxe    2201         return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2202 #elif defined(arch_atomic_try_cmpxchg)           2202 #elif defined(arch_atomic_try_cmpxchg)
2203         return arch_atomic_try_cmpxchg(v, old    2203         return arch_atomic_try_cmpxchg(v, old, new);
2204 #else                                            2204 #else
2205         int r, o = *old;                         2205         int r, o = *old;
2206         r = raw_atomic_cmpxchg_release(v, o,     2206         r = raw_atomic_cmpxchg_release(v, o, new);
2207         if (unlikely(r != o))                    2207         if (unlikely(r != o))
2208                 *old = r;                        2208                 *old = r;
2209         return likely(r == o);                   2209         return likely(r == o);
2210 #endif                                           2210 #endif
2211 }                                                2211 }
2212                                                  2212 
2213 /**                                              2213 /**
2214  * raw_atomic_try_cmpxchg_relaxed() - atomic     2214  * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2215  * @v: pointer to atomic_t                       2215  * @v: pointer to atomic_t
2216  * @old: pointer to int value to compare with    2216  * @old: pointer to int value to compare with
2217  * @new: int value to assign                     2217  * @new: int value to assign
2218  *                                               2218  *
2219  * If (@v == @old), atomically updates @v to     2219  * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2220  * Otherwise, @v is not modified, @old is upd    2220  * Otherwise, @v is not modified, @old is updated to the current value of @v,
2221  * and relaxed ordering is provided.             2221  * and relaxed ordering is provided.
2222  *                                               2222  *
2223  * Safe to use in noinstr code; prefer atomic    2223  * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
2224  *                                               2224  *
2225  * Return: @true if the exchange occured, @fa    2225  * Return: @true if the exchange occured, @false otherwise.
2226  */                                              2226  */
2227 static __always_inline bool                      2227 static __always_inline bool
2228 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, i    2228 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
2229 {                                                2229 {
2230 #if defined(arch_atomic_try_cmpxchg_relaxed)     2230 #if defined(arch_atomic_try_cmpxchg_relaxed)
2231         return arch_atomic_try_cmpxchg_relaxe    2231         return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2232 #elif defined(arch_atomic_try_cmpxchg)           2232 #elif defined(arch_atomic_try_cmpxchg)
2233         return arch_atomic_try_cmpxchg(v, old    2233         return arch_atomic_try_cmpxchg(v, old, new);
2234 #else                                            2234 #else
2235         int r, o = *old;                         2235         int r, o = *old;
2236         r = raw_atomic_cmpxchg_relaxed(v, o,     2236         r = raw_atomic_cmpxchg_relaxed(v, o, new);
2237         if (unlikely(r != o))                    2237         if (unlikely(r != o))
2238                 *old = r;                        2238                 *old = r;
2239         return likely(r == o);                   2239         return likely(r == o);
2240 #endif                                           2240 #endif
2241 }                                                2241 }
2242                                                  2242 
2243 /**                                              2243 /**
2244  * raw_atomic_sub_and_test() - atomic subtrac    2244  * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
2245  * @i: int value to subtract                     2245  * @i: int value to subtract
2246  * @v: pointer to atomic_t                       2246  * @v: pointer to atomic_t
2247  *                                               2247  *
2248  * Atomically updates @v to (@v - @i) with fu    2248  * Atomically updates @v to (@v - @i) with full ordering.
2249  *                                               2249  *
2250  * Safe to use in noinstr code; prefer atomic    2250  * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
2251  *                                               2251  *
2252  * Return: @true if the resulting value of @v    2252  * Return: @true if the resulting value of @v is zero, @false otherwise.
2253  */                                              2253  */
2254 static __always_inline bool                      2254 static __always_inline bool
2255 raw_atomic_sub_and_test(int i, atomic_t *v)      2255 raw_atomic_sub_and_test(int i, atomic_t *v)
2256 {                                                2256 {
2257 #if defined(arch_atomic_sub_and_test)            2257 #if defined(arch_atomic_sub_and_test)
2258         return arch_atomic_sub_and_test(i, v)    2258         return arch_atomic_sub_and_test(i, v);
2259 #else                                            2259 #else
2260         return raw_atomic_sub_return(i, v) ==    2260         return raw_atomic_sub_return(i, v) == 0;
2261 #endif                                           2261 #endif
2262 }                                                2262 }
2263                                                  2263 
2264 /**                                              2264 /**
2265  * raw_atomic_dec_and_test() - atomic decreme    2265  * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
2266  * @v: pointer to atomic_t                       2266  * @v: pointer to atomic_t
2267  *                                               2267  *
2268  * Atomically updates @v to (@v - 1) with ful    2268  * Atomically updates @v to (@v - 1) with full ordering.
2269  *                                               2269  *
2270  * Safe to use in noinstr code; prefer atomic    2270  * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
2271  *                                               2271  *
2272  * Return: @true if the resulting value of @v    2272  * Return: @true if the resulting value of @v is zero, @false otherwise.
2273  */                                              2273  */
2274 static __always_inline bool                      2274 static __always_inline bool
2275 raw_atomic_dec_and_test(atomic_t *v)             2275 raw_atomic_dec_and_test(atomic_t *v)
2276 {                                                2276 {
2277 #if defined(arch_atomic_dec_and_test)            2277 #if defined(arch_atomic_dec_and_test)
2278         return arch_atomic_dec_and_test(v);      2278         return arch_atomic_dec_and_test(v);
2279 #else                                            2279 #else
2280         return raw_atomic_dec_return(v) == 0;    2280         return raw_atomic_dec_return(v) == 0;
2281 #endif                                           2281 #endif
2282 }                                                2282 }
2283                                                  2283 
2284 /**                                              2284 /**
2285  * raw_atomic_inc_and_test() - atomic increme    2285  * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
2286  * @v: pointer to atomic_t                       2286  * @v: pointer to atomic_t
2287  *                                               2287  *
2288  * Atomically updates @v to (@v + 1) with ful    2288  * Atomically updates @v to (@v + 1) with full ordering.
2289  *                                               2289  *
2290  * Safe to use in noinstr code; prefer atomic    2290  * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
2291  *                                               2291  *
2292  * Return: @true if the resulting value of @v    2292  * Return: @true if the resulting value of @v is zero, @false otherwise.
2293  */                                              2293  */
2294 static __always_inline bool                      2294 static __always_inline bool
2295 raw_atomic_inc_and_test(atomic_t *v)             2295 raw_atomic_inc_and_test(atomic_t *v)
2296 {                                                2296 {
2297 #if defined(arch_atomic_inc_and_test)            2297 #if defined(arch_atomic_inc_and_test)
2298         return arch_atomic_inc_and_test(v);      2298         return arch_atomic_inc_and_test(v);
2299 #else                                            2299 #else
2300         return raw_atomic_inc_return(v) == 0;    2300         return raw_atomic_inc_return(v) == 0;
2301 #endif                                           2301 #endif
2302 }                                                2302 }
2303                                                  2303 
2304 /**                                              2304 /**
2305  * raw_atomic_add_negative() - atomic add and    2305  * raw_atomic_add_negative() - atomic add and test if negative with full ordering
2306  * @i: int value to add                          2306  * @i: int value to add
2307  * @v: pointer to atomic_t                       2307  * @v: pointer to atomic_t
2308  *                                               2308  *
2309  * Atomically updates @v to (@v + @i) with fu    2309  * Atomically updates @v to (@v + @i) with full ordering.
2310  *                                               2310  *
2311  * Safe to use in noinstr code; prefer atomic    2311  * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
2312  *                                               2312  *
2313  * Return: @true if the resulting value of @v    2313  * Return: @true if the resulting value of @v is negative, @false otherwise.
2314  */                                              2314  */
2315 static __always_inline bool                      2315 static __always_inline bool
2316 raw_atomic_add_negative(int i, atomic_t *v)      2316 raw_atomic_add_negative(int i, atomic_t *v)
2317 {                                                2317 {
2318 #if defined(arch_atomic_add_negative)            2318 #if defined(arch_atomic_add_negative)
2319         return arch_atomic_add_negative(i, v)    2319         return arch_atomic_add_negative(i, v);
2320 #elif defined(arch_atomic_add_negative_relaxe    2320 #elif defined(arch_atomic_add_negative_relaxed)
2321         bool ret;                                2321         bool ret;
2322         __atomic_pre_full_fence();               2322         __atomic_pre_full_fence();
2323         ret = arch_atomic_add_negative_relaxe    2323         ret = arch_atomic_add_negative_relaxed(i, v);
2324         __atomic_post_full_fence();              2324         __atomic_post_full_fence();
2325         return ret;                              2325         return ret;
2326 #else                                            2326 #else
2327         return raw_atomic_add_return(i, v) <     2327         return raw_atomic_add_return(i, v) < 0;
2328 #endif                                           2328 #endif
2329 }                                                2329 }
2330                                                  2330 
2331 /**                                              2331 /**
2332  * raw_atomic_add_negative_acquire() - atomic    2332  * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
2333  * @i: int value to add                          2333  * @i: int value to add
2334  * @v: pointer to atomic_t                       2334  * @v: pointer to atomic_t
2335  *                                               2335  *
2336  * Atomically updates @v to (@v + @i) with ac    2336  * Atomically updates @v to (@v + @i) with acquire ordering.
2337  *                                               2337  *
2338  * Safe to use in noinstr code; prefer atomic    2338  * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
2339  *                                               2339  *
2340  * Return: @true if the resulting value of @v    2340  * Return: @true if the resulting value of @v is negative, @false otherwise.
2341  */                                              2341  */
2342 static __always_inline bool                      2342 static __always_inline bool
2343 raw_atomic_add_negative_acquire(int i, atomic    2343 raw_atomic_add_negative_acquire(int i, atomic_t *v)
2344 {                                                2344 {
2345 #if defined(arch_atomic_add_negative_acquire)    2345 #if defined(arch_atomic_add_negative_acquire)
2346         return arch_atomic_add_negative_acqui    2346         return arch_atomic_add_negative_acquire(i, v);
2347 #elif defined(arch_atomic_add_negative_relaxe    2347 #elif defined(arch_atomic_add_negative_relaxed)
2348         bool ret = arch_atomic_add_negative_r    2348         bool ret = arch_atomic_add_negative_relaxed(i, v);
2349         __atomic_acquire_fence();                2349         __atomic_acquire_fence();
2350         return ret;                              2350         return ret;
2351 #elif defined(arch_atomic_add_negative)          2351 #elif defined(arch_atomic_add_negative)
2352         return arch_atomic_add_negative(i, v)    2352         return arch_atomic_add_negative(i, v);
2353 #else                                            2353 #else
2354         return raw_atomic_add_return_acquire(    2354         return raw_atomic_add_return_acquire(i, v) < 0;
2355 #endif                                           2355 #endif
2356 }                                                2356 }
2357                                                  2357 
2358 /**                                              2358 /**
2359  * raw_atomic_add_negative_release() - atomic    2359  * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
2360  * @i: int value to add                          2360  * @i: int value to add
2361  * @v: pointer to atomic_t                       2361  * @v: pointer to atomic_t
2362  *                                               2362  *
2363  * Atomically updates @v to (@v + @i) with re    2363  * Atomically updates @v to (@v + @i) with release ordering.
2364  *                                               2364  *
2365  * Safe to use in noinstr code; prefer atomic    2365  * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
2366  *                                               2366  *
2367  * Return: @true if the resulting value of @v    2367  * Return: @true if the resulting value of @v is negative, @false otherwise.
2368  */                                              2368  */
2369 static __always_inline bool                      2369 static __always_inline bool
2370 raw_atomic_add_negative_release(int i, atomic    2370 raw_atomic_add_negative_release(int i, atomic_t *v)
2371 {                                                2371 {
2372 #if defined(arch_atomic_add_negative_release)    2372 #if defined(arch_atomic_add_negative_release)
2373         return arch_atomic_add_negative_relea    2373         return arch_atomic_add_negative_release(i, v);
2374 #elif defined(arch_atomic_add_negative_relaxe    2374 #elif defined(arch_atomic_add_negative_relaxed)
2375         __atomic_release_fence();                2375         __atomic_release_fence();
2376         return arch_atomic_add_negative_relax    2376         return arch_atomic_add_negative_relaxed(i, v);
2377 #elif defined(arch_atomic_add_negative)          2377 #elif defined(arch_atomic_add_negative)
2378         return arch_atomic_add_negative(i, v)    2378         return arch_atomic_add_negative(i, v);
2379 #else                                            2379 #else
2380         return raw_atomic_add_return_release(    2380         return raw_atomic_add_return_release(i, v) < 0;
2381 #endif                                           2381 #endif
2382 }                                                2382 }
2383                                                  2383 
2384 /**                                              2384 /**
2385  * raw_atomic_add_negative_relaxed() - atomic    2385  * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
2386  * @i: int value to add                          2386  * @i: int value to add
2387  * @v: pointer to atomic_t                       2387  * @v: pointer to atomic_t
2388  *                                               2388  *
2389  * Atomically updates @v to (@v + @i) with re    2389  * Atomically updates @v to (@v + @i) with relaxed ordering.
2390  *                                               2390  *
2391  * Safe to use in noinstr code; prefer atomic    2391  * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
2392  *                                               2392  *
2393  * Return: @true if the resulting value of @v    2393  * Return: @true if the resulting value of @v is negative, @false otherwise.
2394  */                                              2394  */
2395 static __always_inline bool                      2395 static __always_inline bool
2396 raw_atomic_add_negative_relaxed(int i, atomic    2396 raw_atomic_add_negative_relaxed(int i, atomic_t *v)
2397 {                                                2397 {
2398 #if defined(arch_atomic_add_negative_relaxed)    2398 #if defined(arch_atomic_add_negative_relaxed)
2399         return arch_atomic_add_negative_relax    2399         return arch_atomic_add_negative_relaxed(i, v);
2400 #elif defined(arch_atomic_add_negative)          2400 #elif defined(arch_atomic_add_negative)
2401         return arch_atomic_add_negative(i, v)    2401         return arch_atomic_add_negative(i, v);
2402 #else                                            2402 #else
2403         return raw_atomic_add_return_relaxed(    2403         return raw_atomic_add_return_relaxed(i, v) < 0;
2404 #endif                                           2404 #endif
2405 }                                                2405 }
2406                                                  2406 
2407 /**                                              2407 /**
2408  * raw_atomic_fetch_add_unless() - atomic add    2408  * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
2409  * @v: pointer to atomic_t                       2409  * @v: pointer to atomic_t
2410  * @a: int value to add                          2410  * @a: int value to add
2411  * @u: int value to compare with                 2411  * @u: int value to compare with
2412  *                                               2412  *
2413  * If (@v != @u), atomically updates @v to (@    2413  * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2414  * Otherwise, @v is not modified and relaxed     2414  * Otherwise, @v is not modified and relaxed ordering is provided.
2415  *                                               2415  *
2416  * Safe to use in noinstr code; prefer atomic    2416  * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
2417  *                                               2417  *
2418  * Return: The original value of @v.             2418  * Return: The original value of @v.
2419  */                                              2419  */
2420 static __always_inline int                       2420 static __always_inline int
2421 raw_atomic_fetch_add_unless(atomic_t *v, int     2421 raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
2422 {                                                2422 {
2423 #if defined(arch_atomic_fetch_add_unless)        2423 #if defined(arch_atomic_fetch_add_unless)
2424         return arch_atomic_fetch_add_unless(v    2424         return arch_atomic_fetch_add_unless(v, a, u);
2425 #else                                            2425 #else
2426         int c = raw_atomic_read(v);              2426         int c = raw_atomic_read(v);
2427                                                  2427 
2428         do {                                     2428         do {
2429                 if (unlikely(c == u))            2429                 if (unlikely(c == u))
2430                         break;                   2430                         break;
2431         } while (!raw_atomic_try_cmpxchg(v, &    2431         } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
2432                                                  2432 
2433         return c;                                2433         return c;
2434 #endif                                           2434 #endif
2435 }                                                2435 }
2436                                                  2436 
2437 /**                                              2437 /**
2438  * raw_atomic_add_unless() - atomic add unles    2438  * raw_atomic_add_unless() - atomic add unless value with full ordering
2439  * @v: pointer to atomic_t                       2439  * @v: pointer to atomic_t
2440  * @a: int value to add                          2440  * @a: int value to add
2441  * @u: int value to compare with                 2441  * @u: int value to compare with
2442  *                                               2442  *
2443  * If (@v != @u), atomically updates @v to (@    2443  * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2444  * Otherwise, @v is not modified and relaxed     2444  * Otherwise, @v is not modified and relaxed ordering is provided.
2445  *                                               2445  *
2446  * Safe to use in noinstr code; prefer atomic    2446  * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
2447  *                                               2447  *
2448  * Return: @true if @v was updated, @false ot    2448  * Return: @true if @v was updated, @false otherwise.
2449  */                                              2449  */
2450 static __always_inline bool                      2450 static __always_inline bool
2451 raw_atomic_add_unless(atomic_t *v, int a, int    2451 raw_atomic_add_unless(atomic_t *v, int a, int u)
2452 {                                                2452 {
2453 #if defined(arch_atomic_add_unless)              2453 #if defined(arch_atomic_add_unless)
2454         return arch_atomic_add_unless(v, a, u    2454         return arch_atomic_add_unless(v, a, u);
2455 #else                                            2455 #else
2456         return raw_atomic_fetch_add_unless(v,    2456         return raw_atomic_fetch_add_unless(v, a, u) != u;
2457 #endif                                           2457 #endif
2458 }                                                2458 }
2459                                                  2459 
2460 /**                                              2460 /**
2461  * raw_atomic_inc_not_zero() - atomic increme    2461  * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
2462  * @v: pointer to atomic_t                       2462  * @v: pointer to atomic_t
2463  *                                               2463  *
2464  * If (@v != 0), atomically updates @v to (@v    2464  * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
2465  * Otherwise, @v is not modified and relaxed     2465  * Otherwise, @v is not modified and relaxed ordering is provided.
2466  *                                               2466  *
2467  * Safe to use in noinstr code; prefer atomic    2467  * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
2468  *                                               2468  *
2469  * Return: @true if @v was updated, @false ot    2469  * Return: @true if @v was updated, @false otherwise.
2470  */                                              2470  */
2471 static __always_inline bool                      2471 static __always_inline bool
2472 raw_atomic_inc_not_zero(atomic_t *v)             2472 raw_atomic_inc_not_zero(atomic_t *v)
2473 {                                                2473 {
2474 #if defined(arch_atomic_inc_not_zero)            2474 #if defined(arch_atomic_inc_not_zero)
2475         return arch_atomic_inc_not_zero(v);      2475         return arch_atomic_inc_not_zero(v);
2476 #else                                            2476 #else
2477         return raw_atomic_add_unless(v, 1, 0)    2477         return raw_atomic_add_unless(v, 1, 0);
2478 #endif                                           2478 #endif
2479 }                                                2479 }
2480                                                  2480 
2481 /**                                              2481 /**
2482  * raw_atomic_inc_unless_negative() - atomic     2482  * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
2483  * @v: pointer to atomic_t                       2483  * @v: pointer to atomic_t
2484  *                                               2484  *
2485  * If (@v >= 0), atomically updates @v to (@v    2485  * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
2486  * Otherwise, @v is not modified and relaxed     2486  * Otherwise, @v is not modified and relaxed ordering is provided.
2487  *                                               2487  *
2488  * Safe to use in noinstr code; prefer atomic    2488  * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
2489  *                                               2489  *
2490  * Return: @true if @v was updated, @false ot    2490  * Return: @true if @v was updated, @false otherwise.
2491  */                                              2491  */
2492 static __always_inline bool                      2492 static __always_inline bool
2493 raw_atomic_inc_unless_negative(atomic_t *v)      2493 raw_atomic_inc_unless_negative(atomic_t *v)
2494 {                                                2494 {
2495 #if defined(arch_atomic_inc_unless_negative)     2495 #if defined(arch_atomic_inc_unless_negative)
2496         return arch_atomic_inc_unless_negativ    2496         return arch_atomic_inc_unless_negative(v);
2497 #else                                            2497 #else
2498         int c = raw_atomic_read(v);              2498         int c = raw_atomic_read(v);
2499                                                  2499 
2500         do {                                     2500         do {
2501                 if (unlikely(c < 0))             2501                 if (unlikely(c < 0))
2502                         return false;            2502                         return false;
2503         } while (!raw_atomic_try_cmpxchg(v, &    2503         } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
2504                                                  2504 
2505         return true;                             2505         return true;
2506 #endif                                           2506 #endif
2507 }                                                2507 }
2508                                                  2508 
2509 /**                                              2509 /**
2510  * raw_atomic_dec_unless_positive() - atomic     2510  * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
2511  * @v: pointer to atomic_t                       2511  * @v: pointer to atomic_t
2512  *                                               2512  *
2513  * If (@v <= 0), atomically updates @v to (@v    2513  * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
2514  * Otherwise, @v is not modified and relaxed     2514  * Otherwise, @v is not modified and relaxed ordering is provided.
2515  *                                               2515  *
2516  * Safe to use in noinstr code; prefer atomic    2516  * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
2517  *                                               2517  *
2518  * Return: @true if @v was updated, @false ot    2518  * Return: @true if @v was updated, @false otherwise.
2519  */                                              2519  */
2520 static __always_inline bool                      2520 static __always_inline bool
2521 raw_atomic_dec_unless_positive(atomic_t *v)      2521 raw_atomic_dec_unless_positive(atomic_t *v)
2522 {                                                2522 {
2523 #if defined(arch_atomic_dec_unless_positive)     2523 #if defined(arch_atomic_dec_unless_positive)
2524         return arch_atomic_dec_unless_positiv    2524         return arch_atomic_dec_unless_positive(v);
2525 #else                                            2525 #else
2526         int c = raw_atomic_read(v);              2526         int c = raw_atomic_read(v);
2527                                                  2527 
2528         do {                                     2528         do {
2529                 if (unlikely(c > 0))             2529                 if (unlikely(c > 0))
2530                         return false;            2530                         return false;
2531         } while (!raw_atomic_try_cmpxchg(v, &    2531         } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
2532                                                  2532 
2533         return true;                             2533         return true;
2534 #endif                                           2534 #endif
2535 }                                                2535 }
2536                                                  2536 
2537 /**                                              2537 /**
2538  * raw_atomic_dec_if_positive() - atomic decr    2538  * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
2539  * @v: pointer to atomic_t                       2539  * @v: pointer to atomic_t
2540  *                                               2540  *
2541  * If (@v > 0), atomically updates @v to (@v     2541  * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
2542  * Otherwise, @v is not modified and relaxed     2542  * Otherwise, @v is not modified and relaxed ordering is provided.
2543  *                                               2543  *
2544  * Safe to use in noinstr code; prefer atomic    2544  * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
2545  *                                               2545  *
2546  * Return: The old value of (@v - 1), regardl    2546  * Return: The old value of (@v - 1), regardless of whether @v was updated.
2547  */                                              2547  */
2548 static __always_inline int                       2548 static __always_inline int
2549 raw_atomic_dec_if_positive(atomic_t *v)          2549 raw_atomic_dec_if_positive(atomic_t *v)
2550 {                                                2550 {
2551 #if defined(arch_atomic_dec_if_positive)         2551 #if defined(arch_atomic_dec_if_positive)
2552         return arch_atomic_dec_if_positive(v)    2552         return arch_atomic_dec_if_positive(v);
2553 #else                                            2553 #else
2554         int dec, c = raw_atomic_read(v);         2554         int dec, c = raw_atomic_read(v);
2555                                                  2555 
2556         do {                                     2556         do {
2557                 dec = c - 1;                     2557                 dec = c - 1;
2558                 if (unlikely(dec < 0))           2558                 if (unlikely(dec < 0))
2559                         break;                   2559                         break;
2560         } while (!raw_atomic_try_cmpxchg(v, &    2560         } while (!raw_atomic_try_cmpxchg(v, &c, dec));
2561                                                  2561 
2562         return dec;                              2562         return dec;
2563 #endif                                           2563 #endif
2564 }                                                2564 }
2565                                                  2565 
2566 #ifdef CONFIG_GENERIC_ATOMIC64                   2566 #ifdef CONFIG_GENERIC_ATOMIC64
2567 #include <asm-generic/atomic64.h>                2567 #include <asm-generic/atomic64.h>
2568 #endif                                           2568 #endif
2569                                                  2569 
2570 /**                                              2570 /**
2571  * raw_atomic64_read() - atomic load with rel    2571  * raw_atomic64_read() - atomic load with relaxed ordering
2572  * @v: pointer to atomic64_t                     2572  * @v: pointer to atomic64_t
2573  *                                               2573  *
2574  * Atomically loads the value of @v with rela    2574  * Atomically loads the value of @v with relaxed ordering.
2575  *                                               2575  *
2576  * Safe to use in noinstr code; prefer atomic    2576  * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
2577  *                                               2577  *
2578  * Return: The value loaded from @v.             2578  * Return: The value loaded from @v.
2579  */                                              2579  */
2580 static __always_inline s64                       2580 static __always_inline s64
2581 raw_atomic64_read(const atomic64_t *v)           2581 raw_atomic64_read(const atomic64_t *v)
2582 {                                                2582 {
2583         return arch_atomic64_read(v);            2583         return arch_atomic64_read(v);
2584 }                                                2584 }
2585                                                  2585 
2586 /**                                              2586 /**
2587  * raw_atomic64_read_acquire() - atomic load     2587  * raw_atomic64_read_acquire() - atomic load with acquire ordering
2588  * @v: pointer to atomic64_t                     2588  * @v: pointer to atomic64_t
2589  *                                               2589  *
2590  * Atomically loads the value of @v with acqu    2590  * Atomically loads the value of @v with acquire ordering.
2591  *                                               2591  *
2592  * Safe to use in noinstr code; prefer atomic    2592  * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
2593  *                                               2593  *
2594  * Return: The value loaded from @v.             2594  * Return: The value loaded from @v.
2595  */                                              2595  */
2596 static __always_inline s64                       2596 static __always_inline s64
2597 raw_atomic64_read_acquire(const atomic64_t *v    2597 raw_atomic64_read_acquire(const atomic64_t *v)
2598 {                                                2598 {
2599 #if defined(arch_atomic64_read_acquire)          2599 #if defined(arch_atomic64_read_acquire)
2600         return arch_atomic64_read_acquire(v);    2600         return arch_atomic64_read_acquire(v);
2601 #else                                            2601 #else
2602         s64 ret;                                 2602         s64 ret;
2603                                                  2603 
2604         if (__native_word(atomic64_t)) {         2604         if (__native_word(atomic64_t)) {
2605                 ret = smp_load_acquire(&(v)->    2605                 ret = smp_load_acquire(&(v)->counter);
2606         } else {                                 2606         } else {
2607                 ret = raw_atomic64_read(v);      2607                 ret = raw_atomic64_read(v);
2608                 __atomic_acquire_fence();        2608                 __atomic_acquire_fence();
2609         }                                        2609         }
2610                                                  2610 
2611         return ret;                              2611         return ret;
2612 #endif                                           2612 #endif
2613 }                                                2613 }
2614                                                  2614 
2615 /**                                              2615 /**
2616  * raw_atomic64_set() - atomic set with relax    2616  * raw_atomic64_set() - atomic set with relaxed ordering
2617  * @v: pointer to atomic64_t                     2617  * @v: pointer to atomic64_t
2618  * @i: s64 value to assign                       2618  * @i: s64 value to assign
2619  *                                               2619  *
2620  * Atomically sets @v to @i with relaxed orde    2620  * Atomically sets @v to @i with relaxed ordering.
2621  *                                               2621  *
2622  * Safe to use in noinstr code; prefer atomic    2622  * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
2623  *                                               2623  *
2624  * Return: Nothing.                              2624  * Return: Nothing.
2625  */                                              2625  */
2626 static __always_inline void                      2626 static __always_inline void
2627 raw_atomic64_set(atomic64_t *v, s64 i)           2627 raw_atomic64_set(atomic64_t *v, s64 i)
2628 {                                                2628 {
2629         arch_atomic64_set(v, i);                 2629         arch_atomic64_set(v, i);
2630 }                                                2630 }
2631                                                  2631 
2632 /**                                              2632 /**
2633  * raw_atomic64_set_release() - atomic set wi    2633  * raw_atomic64_set_release() - atomic set with release ordering
2634  * @v: pointer to atomic64_t                     2634  * @v: pointer to atomic64_t
2635  * @i: s64 value to assign                       2635  * @i: s64 value to assign
2636  *                                               2636  *
2637  * Atomically sets @v to @i with release orde    2637  * Atomically sets @v to @i with release ordering.
2638  *                                               2638  *
2639  * Safe to use in noinstr code; prefer atomic    2639  * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
2640  *                                               2640  *
2641  * Return: Nothing.                              2641  * Return: Nothing.
2642  */                                              2642  */
2643 static __always_inline void                      2643 static __always_inline void
2644 raw_atomic64_set_release(atomic64_t *v, s64 i    2644 raw_atomic64_set_release(atomic64_t *v, s64 i)
2645 {                                                2645 {
2646 #if defined(arch_atomic64_set_release)           2646 #if defined(arch_atomic64_set_release)
2647         arch_atomic64_set_release(v, i);         2647         arch_atomic64_set_release(v, i);
2648 #else                                            2648 #else
2649         if (__native_word(atomic64_t)) {         2649         if (__native_word(atomic64_t)) {
2650                 smp_store_release(&(v)->count    2650                 smp_store_release(&(v)->counter, i);
2651         } else {                                 2651         } else {
2652                 __atomic_release_fence();        2652                 __atomic_release_fence();
2653                 raw_atomic64_set(v, i);          2653                 raw_atomic64_set(v, i);
2654         }                                        2654         }
2655 #endif                                           2655 #endif
2656 }                                                2656 }
2657                                                  2657 
2658 /**                                              2658 /**
2659  * raw_atomic64_add() - atomic add with relax    2659  * raw_atomic64_add() - atomic add with relaxed ordering
2660  * @i: s64 value to add                          2660  * @i: s64 value to add
2661  * @v: pointer to atomic64_t                     2661  * @v: pointer to atomic64_t
2662  *                                               2662  *
2663  * Atomically updates @v to (@v + @i) with re    2663  * Atomically updates @v to (@v + @i) with relaxed ordering.
2664  *                                               2664  *
2665  * Safe to use in noinstr code; prefer atomic    2665  * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
2666  *                                               2666  *
2667  * Return: Nothing.                              2667  * Return: Nothing.
2668  */                                              2668  */
2669 static __always_inline void                      2669 static __always_inline void
2670 raw_atomic64_add(s64 i, atomic64_t *v)           2670 raw_atomic64_add(s64 i, atomic64_t *v)
2671 {                                                2671 {
2672         arch_atomic64_add(i, v);                 2672         arch_atomic64_add(i, v);
2673 }                                                2673 }
2674                                                  2674 
2675 /**                                              2675 /**
2676  * raw_atomic64_add_return() - atomic add wit    2676  * raw_atomic64_add_return() - atomic add with full ordering
2677  * @i: s64 value to add                          2677  * @i: s64 value to add
2678  * @v: pointer to atomic64_t                     2678  * @v: pointer to atomic64_t
2679  *                                               2679  *
2680  * Atomically updates @v to (@v + @i) with fu    2680  * Atomically updates @v to (@v + @i) with full ordering.
2681  *                                               2681  *
2682  * Safe to use in noinstr code; prefer atomic    2682  * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
2683  *                                               2683  *
2684  * Return: The updated value of @v.              2684  * Return: The updated value of @v.
2685  */                                              2685  */
2686 static __always_inline s64                       2686 static __always_inline s64
2687 raw_atomic64_add_return(s64 i, atomic64_t *v)    2687 raw_atomic64_add_return(s64 i, atomic64_t *v)
2688 {                                                2688 {
2689 #if defined(arch_atomic64_add_return)            2689 #if defined(arch_atomic64_add_return)
2690         return arch_atomic64_add_return(i, v)    2690         return arch_atomic64_add_return(i, v);
2691 #elif defined(arch_atomic64_add_return_relaxe    2691 #elif defined(arch_atomic64_add_return_relaxed)
2692         s64 ret;                                 2692         s64 ret;
2693         __atomic_pre_full_fence();               2693         __atomic_pre_full_fence();
2694         ret = arch_atomic64_add_return_relaxe    2694         ret = arch_atomic64_add_return_relaxed(i, v);
2695         __atomic_post_full_fence();              2695         __atomic_post_full_fence();
2696         return ret;                              2696         return ret;
2697 #else                                            2697 #else
2698 #error "Unable to define raw_atomic64_add_ret    2698 #error "Unable to define raw_atomic64_add_return"
2699 #endif                                           2699 #endif
2700 }                                                2700 }
2701                                                  2701 
2702 /**                                              2702 /**
2703  * raw_atomic64_add_return_acquire() - atomic    2703  * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
2704  * @i: s64 value to add                          2704  * @i: s64 value to add
2705  * @v: pointer to atomic64_t                     2705  * @v: pointer to atomic64_t
2706  *                                               2706  *
2707  * Atomically updates @v to (@v + @i) with ac    2707  * Atomically updates @v to (@v + @i) with acquire ordering.
2708  *                                               2708  *
2709  * Safe to use in noinstr code; prefer atomic    2709  * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
2710  *                                               2710  *
2711  * Return: The updated value of @v.              2711  * Return: The updated value of @v.
2712  */                                              2712  */
2713 static __always_inline s64                       2713 static __always_inline s64
2714 raw_atomic64_add_return_acquire(s64 i, atomic    2714 raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
2715 {                                                2715 {
2716 #if defined(arch_atomic64_add_return_acquire)    2716 #if defined(arch_atomic64_add_return_acquire)
2717         return arch_atomic64_add_return_acqui    2717         return arch_atomic64_add_return_acquire(i, v);
2718 #elif defined(arch_atomic64_add_return_relaxe    2718 #elif defined(arch_atomic64_add_return_relaxed)
2719         s64 ret = arch_atomic64_add_return_re    2719         s64 ret = arch_atomic64_add_return_relaxed(i, v);
2720         __atomic_acquire_fence();                2720         __atomic_acquire_fence();
2721         return ret;                              2721         return ret;
2722 #elif defined(arch_atomic64_add_return)          2722 #elif defined(arch_atomic64_add_return)
2723         return arch_atomic64_add_return(i, v)    2723         return arch_atomic64_add_return(i, v);
2724 #else                                            2724 #else
2725 #error "Unable to define raw_atomic64_add_ret    2725 #error "Unable to define raw_atomic64_add_return_acquire"
2726 #endif                                           2726 #endif
2727 }                                                2727 }
2728                                                  2728 
2729 /**                                              2729 /**
2730  * raw_atomic64_add_return_release() - atomic    2730  * raw_atomic64_add_return_release() - atomic add with release ordering
2731  * @i: s64 value to add                          2731  * @i: s64 value to add
2732  * @v: pointer to atomic64_t                     2732  * @v: pointer to atomic64_t
2733  *                                               2733  *
2734  * Atomically updates @v to (@v + @i) with re    2734  * Atomically updates @v to (@v + @i) with release ordering.
2735  *                                               2735  *
2736  * Safe to use in noinstr code; prefer atomic    2736  * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
2737  *                                               2737  *
2738  * Return: The updated value of @v.              2738  * Return: The updated value of @v.
2739  */                                              2739  */
2740 static __always_inline s64                       2740 static __always_inline s64
2741 raw_atomic64_add_return_release(s64 i, atomic    2741 raw_atomic64_add_return_release(s64 i, atomic64_t *v)
2742 {                                                2742 {
2743 #if defined(arch_atomic64_add_return_release)    2743 #if defined(arch_atomic64_add_return_release)
2744         return arch_atomic64_add_return_relea    2744         return arch_atomic64_add_return_release(i, v);
2745 #elif defined(arch_atomic64_add_return_relaxe    2745 #elif defined(arch_atomic64_add_return_relaxed)
2746         __atomic_release_fence();                2746         __atomic_release_fence();
2747         return arch_atomic64_add_return_relax    2747         return arch_atomic64_add_return_relaxed(i, v);
2748 #elif defined(arch_atomic64_add_return)          2748 #elif defined(arch_atomic64_add_return)
2749         return arch_atomic64_add_return(i, v)    2749         return arch_atomic64_add_return(i, v);
2750 #else                                            2750 #else
2751 #error "Unable to define raw_atomic64_add_ret    2751 #error "Unable to define raw_atomic64_add_return_release"
2752 #endif                                           2752 #endif
2753 }                                                2753 }
2754                                                  2754 
2755 /**                                              2755 /**
2756  * raw_atomic64_add_return_relaxed() - atomic    2756  * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
2757  * @i: s64 value to add                          2757  * @i: s64 value to add
2758  * @v: pointer to atomic64_t                     2758  * @v: pointer to atomic64_t
2759  *                                               2759  *
2760  * Atomically updates @v to (@v + @i) with re    2760  * Atomically updates @v to (@v + @i) with relaxed ordering.
2761  *                                               2761  *
2762  * Safe to use in noinstr code; prefer atomic    2762  * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
2763  *                                               2763  *
2764  * Return: The updated value of @v.              2764  * Return: The updated value of @v.
2765  */                                              2765  */
2766 static __always_inline s64                       2766 static __always_inline s64
2767 raw_atomic64_add_return_relaxed(s64 i, atomic    2767 raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
2768 {                                                2768 {
2769 #if defined(arch_atomic64_add_return_relaxed)    2769 #if defined(arch_atomic64_add_return_relaxed)
2770         return arch_atomic64_add_return_relax    2770         return arch_atomic64_add_return_relaxed(i, v);
2771 #elif defined(arch_atomic64_add_return)          2771 #elif defined(arch_atomic64_add_return)
2772         return arch_atomic64_add_return(i, v)    2772         return arch_atomic64_add_return(i, v);
2773 #else                                            2773 #else
2774 #error "Unable to define raw_atomic64_add_ret    2774 #error "Unable to define raw_atomic64_add_return_relaxed"
2775 #endif                                           2775 #endif
2776 }                                                2776 }
2777                                                  2777 
2778 /**                                              2778 /**
2779  * raw_atomic64_fetch_add() - atomic add with    2779  * raw_atomic64_fetch_add() - atomic add with full ordering
2780  * @i: s64 value to add                          2780  * @i: s64 value to add
2781  * @v: pointer to atomic64_t                     2781  * @v: pointer to atomic64_t
2782  *                                               2782  *
2783  * Atomically updates @v to (@v + @i) with fu    2783  * Atomically updates @v to (@v + @i) with full ordering.
2784  *                                               2784  *
2785  * Safe to use in noinstr code; prefer atomic    2785  * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
2786  *                                               2786  *
2787  * Return: The original value of @v.             2787  * Return: The original value of @v.
2788  */                                              2788  */
2789 static __always_inline s64                       2789 static __always_inline s64
2790 raw_atomic64_fetch_add(s64 i, atomic64_t *v)     2790 raw_atomic64_fetch_add(s64 i, atomic64_t *v)
2791 {                                                2791 {
2792 #if defined(arch_atomic64_fetch_add)             2792 #if defined(arch_atomic64_fetch_add)
2793         return arch_atomic64_fetch_add(i, v);    2793         return arch_atomic64_fetch_add(i, v);
2794 #elif defined(arch_atomic64_fetch_add_relaxed    2794 #elif defined(arch_atomic64_fetch_add_relaxed)
2795         s64 ret;                                 2795         s64 ret;
2796         __atomic_pre_full_fence();               2796         __atomic_pre_full_fence();
2797         ret = arch_atomic64_fetch_add_relaxed    2797         ret = arch_atomic64_fetch_add_relaxed(i, v);
2798         __atomic_post_full_fence();              2798         __atomic_post_full_fence();
2799         return ret;                              2799         return ret;
2800 #else                                            2800 #else
2801 #error "Unable to define raw_atomic64_fetch_a    2801 #error "Unable to define raw_atomic64_fetch_add"
2802 #endif                                           2802 #endif
2803 }                                                2803 }
2804                                                  2804 
2805 /**                                              2805 /**
2806  * raw_atomic64_fetch_add_acquire() - atomic     2806  * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
2807  * @i: s64 value to add                          2807  * @i: s64 value to add
2808  * @v: pointer to atomic64_t                     2808  * @v: pointer to atomic64_t
2809  *                                               2809  *
2810  * Atomically updates @v to (@v + @i) with ac    2810  * Atomically updates @v to (@v + @i) with acquire ordering.
2811  *                                               2811  *
2812  * Safe to use in noinstr code; prefer atomic    2812  * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
2813  *                                               2813  *
2814  * Return: The original value of @v.             2814  * Return: The original value of @v.
2815  */                                              2815  */
2816 static __always_inline s64                       2816 static __always_inline s64
2817 raw_atomic64_fetch_add_acquire(s64 i, atomic6    2817 raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2818 {                                                2818 {
2819 #if defined(arch_atomic64_fetch_add_acquire)     2819 #if defined(arch_atomic64_fetch_add_acquire)
2820         return arch_atomic64_fetch_add_acquir    2820         return arch_atomic64_fetch_add_acquire(i, v);
2821 #elif defined(arch_atomic64_fetch_add_relaxed    2821 #elif defined(arch_atomic64_fetch_add_relaxed)
2822         s64 ret = arch_atomic64_fetch_add_rel    2822         s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2823         __atomic_acquire_fence();                2823         __atomic_acquire_fence();
2824         return ret;                              2824         return ret;
2825 #elif defined(arch_atomic64_fetch_add)           2825 #elif defined(arch_atomic64_fetch_add)
2826         return arch_atomic64_fetch_add(i, v);    2826         return arch_atomic64_fetch_add(i, v);
2827 #else                                            2827 #else
2828 #error "Unable to define raw_atomic64_fetch_a    2828 #error "Unable to define raw_atomic64_fetch_add_acquire"
2829 #endif                                           2829 #endif
2830 }                                                2830 }
2831                                                  2831 
2832 /**                                              2832 /**
2833  * raw_atomic64_fetch_add_release() - atomic     2833  * raw_atomic64_fetch_add_release() - atomic add with release ordering
2834  * @i: s64 value to add                          2834  * @i: s64 value to add
2835  * @v: pointer to atomic64_t                     2835  * @v: pointer to atomic64_t
2836  *                                               2836  *
2837  * Atomically updates @v to (@v + @i) with re    2837  * Atomically updates @v to (@v + @i) with release ordering.
2838  *                                               2838  *
2839  * Safe to use in noinstr code; prefer atomic    2839  * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
2840  *                                               2840  *
2841  * Return: The original value of @v.             2841  * Return: The original value of @v.
2842  */                                              2842  */
2843 static __always_inline s64                       2843 static __always_inline s64
2844 raw_atomic64_fetch_add_release(s64 i, atomic6    2844 raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2845 {                                                2845 {
2846 #if defined(arch_atomic64_fetch_add_release)     2846 #if defined(arch_atomic64_fetch_add_release)
2847         return arch_atomic64_fetch_add_releas    2847         return arch_atomic64_fetch_add_release(i, v);
2848 #elif defined(arch_atomic64_fetch_add_relaxed    2848 #elif defined(arch_atomic64_fetch_add_relaxed)
2849         __atomic_release_fence();                2849         __atomic_release_fence();
2850         return arch_atomic64_fetch_add_relaxe    2850         return arch_atomic64_fetch_add_relaxed(i, v);
2851 #elif defined(arch_atomic64_fetch_add)           2851 #elif defined(arch_atomic64_fetch_add)
2852         return arch_atomic64_fetch_add(i, v);    2852         return arch_atomic64_fetch_add(i, v);
2853 #else                                            2853 #else
2854 #error "Unable to define raw_atomic64_fetch_a    2854 #error "Unable to define raw_atomic64_fetch_add_release"
2855 #endif                                           2855 #endif
2856 }                                                2856 }
2857                                                  2857 
2858 /**                                              2858 /**
2859  * raw_atomic64_fetch_add_relaxed() - atomic     2859  * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
2860  * @i: s64 value to add                          2860  * @i: s64 value to add
2861  * @v: pointer to atomic64_t                     2861  * @v: pointer to atomic64_t
2862  *                                               2862  *
2863  * Atomically updates @v to (@v + @i) with re    2863  * Atomically updates @v to (@v + @i) with relaxed ordering.
2864  *                                               2864  *
2865  * Safe to use in noinstr code; prefer atomic    2865  * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
2866  *                                               2866  *
2867  * Return: The original value of @v.             2867  * Return: The original value of @v.
2868  */                                              2868  */
2869 static __always_inline s64                       2869 static __always_inline s64
2870 raw_atomic64_fetch_add_relaxed(s64 i, atomic6    2870 raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
2871 {                                                2871 {
2872 #if defined(arch_atomic64_fetch_add_relaxed)     2872 #if defined(arch_atomic64_fetch_add_relaxed)
2873         return arch_atomic64_fetch_add_relaxe    2873         return arch_atomic64_fetch_add_relaxed(i, v);
2874 #elif defined(arch_atomic64_fetch_add)           2874 #elif defined(arch_atomic64_fetch_add)
2875         return arch_atomic64_fetch_add(i, v);    2875         return arch_atomic64_fetch_add(i, v);
2876 #else                                            2876 #else
2877 #error "Unable to define raw_atomic64_fetch_a    2877 #error "Unable to define raw_atomic64_fetch_add_relaxed"
2878 #endif                                           2878 #endif
2879 }                                                2879 }
2880                                                  2880 
2881 /**                                              2881 /**
2882  * raw_atomic64_sub() - atomic subtract with     2882  * raw_atomic64_sub() - atomic subtract with relaxed ordering
2883  * @i: s64 value to subtract                     2883  * @i: s64 value to subtract
2884  * @v: pointer to atomic64_t                     2884  * @v: pointer to atomic64_t
2885  *                                               2885  *
2886  * Atomically updates @v to (@v - @i) with re    2886  * Atomically updates @v to (@v - @i) with relaxed ordering.
2887  *                                               2887  *
2888  * Safe to use in noinstr code; prefer atomic    2888  * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
2889  *                                               2889  *
2890  * Return: Nothing.                              2890  * Return: Nothing.
2891  */                                              2891  */
2892 static __always_inline void                      2892 static __always_inline void
2893 raw_atomic64_sub(s64 i, atomic64_t *v)           2893 raw_atomic64_sub(s64 i, atomic64_t *v)
2894 {                                                2894 {
2895         arch_atomic64_sub(i, v);                 2895         arch_atomic64_sub(i, v);
2896 }                                                2896 }
2897                                                  2897 
2898 /**                                              2898 /**
2899  * raw_atomic64_sub_return() - atomic subtrac    2899  * raw_atomic64_sub_return() - atomic subtract with full ordering
2900  * @i: s64 value to subtract                     2900  * @i: s64 value to subtract
2901  * @v: pointer to atomic64_t                     2901  * @v: pointer to atomic64_t
2902  *                                               2902  *
2903  * Atomically updates @v to (@v - @i) with fu    2903  * Atomically updates @v to (@v - @i) with full ordering.
2904  *                                               2904  *
2905  * Safe to use in noinstr code; prefer atomic    2905  * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
2906  *                                               2906  *
2907  * Return: The updated value of @v.              2907  * Return: The updated value of @v.
2908  */                                              2908  */
2909 static __always_inline s64                       2909 static __always_inline s64
2910 raw_atomic64_sub_return(s64 i, atomic64_t *v)    2910 raw_atomic64_sub_return(s64 i, atomic64_t *v)
2911 {                                                2911 {
2912 #if defined(arch_atomic64_sub_return)            2912 #if defined(arch_atomic64_sub_return)
2913         return arch_atomic64_sub_return(i, v)    2913         return arch_atomic64_sub_return(i, v);
2914 #elif defined(arch_atomic64_sub_return_relaxe    2914 #elif defined(arch_atomic64_sub_return_relaxed)
2915         s64 ret;                                 2915         s64 ret;
2916         __atomic_pre_full_fence();               2916         __atomic_pre_full_fence();
2917         ret = arch_atomic64_sub_return_relaxe    2917         ret = arch_atomic64_sub_return_relaxed(i, v);
2918         __atomic_post_full_fence();              2918         __atomic_post_full_fence();
2919         return ret;                              2919         return ret;
2920 #else                                            2920 #else
2921 #error "Unable to define raw_atomic64_sub_ret    2921 #error "Unable to define raw_atomic64_sub_return"
2922 #endif                                           2922 #endif
2923 }                                                2923 }
2924                                                  2924 
2925 /**                                              2925 /**
2926  * raw_atomic64_sub_return_acquire() - atomic    2926  * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
2927  * @i: s64 value to subtract                     2927  * @i: s64 value to subtract
2928  * @v: pointer to atomic64_t                     2928  * @v: pointer to atomic64_t
2929  *                                               2929  *
2930  * Atomically updates @v to (@v - @i) with ac    2930  * Atomically updates @v to (@v - @i) with acquire ordering.
2931  *                                               2931  *
2932  * Safe to use in noinstr code; prefer atomic    2932  * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
2933  *                                               2933  *
2934  * Return: The updated value of @v.              2934  * Return: The updated value of @v.
2935  */                                              2935  */
2936 static __always_inline s64                       2936 static __always_inline s64
2937 raw_atomic64_sub_return_acquire(s64 i, atomic    2937 raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2938 {                                                2938 {
2939 #if defined(arch_atomic64_sub_return_acquire)    2939 #if defined(arch_atomic64_sub_return_acquire)
2940         return arch_atomic64_sub_return_acqui    2940         return arch_atomic64_sub_return_acquire(i, v);
2941 #elif defined(arch_atomic64_sub_return_relaxe    2941 #elif defined(arch_atomic64_sub_return_relaxed)
2942         s64 ret = arch_atomic64_sub_return_re    2942         s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2943         __atomic_acquire_fence();                2943         __atomic_acquire_fence();
2944         return ret;                              2944         return ret;
2945 #elif defined(arch_atomic64_sub_return)          2945 #elif defined(arch_atomic64_sub_return)
2946         return arch_atomic64_sub_return(i, v)    2946         return arch_atomic64_sub_return(i, v);
2947 #else                                            2947 #else
2948 #error "Unable to define raw_atomic64_sub_ret    2948 #error "Unable to define raw_atomic64_sub_return_acquire"
2949 #endif                                           2949 #endif
2950 }                                                2950 }
2951                                                  2951 
2952 /**                                              2952 /**
2953  * raw_atomic64_sub_return_release() - atomic    2953  * raw_atomic64_sub_return_release() - atomic subtract with release ordering
2954  * @i: s64 value to subtract                     2954  * @i: s64 value to subtract
2955  * @v: pointer to atomic64_t                     2955  * @v: pointer to atomic64_t
2956  *                                               2956  *
2957  * Atomically updates @v to (@v - @i) with re    2957  * Atomically updates @v to (@v - @i) with release ordering.
2958  *                                               2958  *
2959  * Safe to use in noinstr code; prefer atomic    2959  * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
2960  *                                               2960  *
2961  * Return: The updated value of @v.              2961  * Return: The updated value of @v.
2962  */                                              2962  */
2963 static __always_inline s64                       2963 static __always_inline s64
2964 raw_atomic64_sub_return_release(s64 i, atomic    2964 raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
2965 {                                                2965 {
2966 #if defined(arch_atomic64_sub_return_release)    2966 #if defined(arch_atomic64_sub_return_release)
2967         return arch_atomic64_sub_return_relea    2967         return arch_atomic64_sub_return_release(i, v);
2968 #elif defined(arch_atomic64_sub_return_relaxe    2968 #elif defined(arch_atomic64_sub_return_relaxed)
2969         __atomic_release_fence();                2969         __atomic_release_fence();
2970         return arch_atomic64_sub_return_relax    2970         return arch_atomic64_sub_return_relaxed(i, v);
2971 #elif defined(arch_atomic64_sub_return)          2971 #elif defined(arch_atomic64_sub_return)
2972         return arch_atomic64_sub_return(i, v)    2972         return arch_atomic64_sub_return(i, v);
2973 #else                                            2973 #else
2974 #error "Unable to define raw_atomic64_sub_ret    2974 #error "Unable to define raw_atomic64_sub_return_release"
2975 #endif                                           2975 #endif
2976 }                                                2976 }
2977                                                  2977 
2978 /**                                              2978 /**
2979  * raw_atomic64_sub_return_relaxed() - atomic    2979  * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
2980  * @i: s64 value to subtract                     2980  * @i: s64 value to subtract
2981  * @v: pointer to atomic64_t                     2981  * @v: pointer to atomic64_t
2982  *                                               2982  *
2983  * Atomically updates @v to (@v - @i) with re    2983  * Atomically updates @v to (@v - @i) with relaxed ordering.
2984  *                                               2984  *
2985  * Safe to use in noinstr code; prefer atomic    2985  * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
2986  *                                               2986  *
2987  * Return: The updated value of @v.              2987  * Return: The updated value of @v.
2988  */                                              2988  */
2989 static __always_inline s64                       2989 static __always_inline s64
2990 raw_atomic64_sub_return_relaxed(s64 i, atomic    2990 raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
2991 {                                                2991 {
2992 #if defined(arch_atomic64_sub_return_relaxed)    2992 #if defined(arch_atomic64_sub_return_relaxed)
2993         return arch_atomic64_sub_return_relax    2993         return arch_atomic64_sub_return_relaxed(i, v);
2994 #elif defined(arch_atomic64_sub_return)          2994 #elif defined(arch_atomic64_sub_return)
2995         return arch_atomic64_sub_return(i, v)    2995         return arch_atomic64_sub_return(i, v);
2996 #else                                            2996 #else
2997 #error "Unable to define raw_atomic64_sub_ret    2997 #error "Unable to define raw_atomic64_sub_return_relaxed"
2998 #endif                                           2998 #endif
2999 }                                                2999 }
3000                                                  3000 
3001 /**                                              3001 /**
3002  * raw_atomic64_fetch_sub() - atomic subtract    3002  * raw_atomic64_fetch_sub() - atomic subtract with full ordering
3003  * @i: s64 value to subtract                     3003  * @i: s64 value to subtract
3004  * @v: pointer to atomic64_t                     3004  * @v: pointer to atomic64_t
3005  *                                               3005  *
3006  * Atomically updates @v to (@v - @i) with fu    3006  * Atomically updates @v to (@v - @i) with full ordering.
3007  *                                               3007  *
3008  * Safe to use in noinstr code; prefer atomic    3008  * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
3009  *                                               3009  *
3010  * Return: The original value of @v.             3010  * Return: The original value of @v.
3011  */                                              3011  */
3012 static __always_inline s64                       3012 static __always_inline s64
3013 raw_atomic64_fetch_sub(s64 i, atomic64_t *v)     3013 raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
3014 {                                                3014 {
3015 #if defined(arch_atomic64_fetch_sub)             3015 #if defined(arch_atomic64_fetch_sub)
3016         return arch_atomic64_fetch_sub(i, v);    3016         return arch_atomic64_fetch_sub(i, v);
3017 #elif defined(arch_atomic64_fetch_sub_relaxed    3017 #elif defined(arch_atomic64_fetch_sub_relaxed)
3018         s64 ret;                                 3018         s64 ret;
3019         __atomic_pre_full_fence();               3019         __atomic_pre_full_fence();
3020         ret = arch_atomic64_fetch_sub_relaxed    3020         ret = arch_atomic64_fetch_sub_relaxed(i, v);
3021         __atomic_post_full_fence();              3021         __atomic_post_full_fence();
3022         return ret;                              3022         return ret;
3023 #else                                            3023 #else
3024 #error "Unable to define raw_atomic64_fetch_s    3024 #error "Unable to define raw_atomic64_fetch_sub"
3025 #endif                                           3025 #endif
3026 }                                                3026 }
3027                                                  3027 
3028 /**                                              3028 /**
3029  * raw_atomic64_fetch_sub_acquire() - atomic     3029  * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
3030  * @i: s64 value to subtract                     3030  * @i: s64 value to subtract
3031  * @v: pointer to atomic64_t                     3031  * @v: pointer to atomic64_t
3032  *                                               3032  *
3033  * Atomically updates @v to (@v - @i) with ac    3033  * Atomically updates @v to (@v - @i) with acquire ordering.
3034  *                                               3034  *
3035  * Safe to use in noinstr code; prefer atomic    3035  * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
3036  *                                               3036  *
3037  * Return: The original value of @v.             3037  * Return: The original value of @v.
3038  */                                              3038  */
3039 static __always_inline s64                       3039 static __always_inline s64
3040 raw_atomic64_fetch_sub_acquire(s64 i, atomic6    3040 raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
3041 {                                                3041 {
3042 #if defined(arch_atomic64_fetch_sub_acquire)     3042 #if defined(arch_atomic64_fetch_sub_acquire)
3043         return arch_atomic64_fetch_sub_acquir    3043         return arch_atomic64_fetch_sub_acquire(i, v);
3044 #elif defined(arch_atomic64_fetch_sub_relaxed    3044 #elif defined(arch_atomic64_fetch_sub_relaxed)
3045         s64 ret = arch_atomic64_fetch_sub_rel    3045         s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3046         __atomic_acquire_fence();                3046         __atomic_acquire_fence();
3047         return ret;                              3047         return ret;
3048 #elif defined(arch_atomic64_fetch_sub)           3048 #elif defined(arch_atomic64_fetch_sub)
3049         return arch_atomic64_fetch_sub(i, v);    3049         return arch_atomic64_fetch_sub(i, v);
3050 #else                                            3050 #else
3051 #error "Unable to define raw_atomic64_fetch_s    3051 #error "Unable to define raw_atomic64_fetch_sub_acquire"
3052 #endif                                           3052 #endif
3053 }                                                3053 }
3054                                                  3054 
3055 /**                                              3055 /**
3056  * raw_atomic64_fetch_sub_release() - atomic     3056  * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
3057  * @i: s64 value to subtract                     3057  * @i: s64 value to subtract
3058  * @v: pointer to atomic64_t                     3058  * @v: pointer to atomic64_t
3059  *                                               3059  *
3060  * Atomically updates @v to (@v - @i) with re    3060  * Atomically updates @v to (@v - @i) with release ordering.
3061  *                                               3061  *
3062  * Safe to use in noinstr code; prefer atomic    3062  * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
3063  *                                               3063  *
3064  * Return: The original value of @v.             3064  * Return: The original value of @v.
3065  */                                              3065  */
3066 static __always_inline s64                       3066 static __always_inline s64
3067 raw_atomic64_fetch_sub_release(s64 i, atomic6    3067 raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
3068 {                                                3068 {
3069 #if defined(arch_atomic64_fetch_sub_release)     3069 #if defined(arch_atomic64_fetch_sub_release)
3070         return arch_atomic64_fetch_sub_releas    3070         return arch_atomic64_fetch_sub_release(i, v);
3071 #elif defined(arch_atomic64_fetch_sub_relaxed    3071 #elif defined(arch_atomic64_fetch_sub_relaxed)
3072         __atomic_release_fence();                3072         __atomic_release_fence();
3073         return arch_atomic64_fetch_sub_relaxe    3073         return arch_atomic64_fetch_sub_relaxed(i, v);
3074 #elif defined(arch_atomic64_fetch_sub)           3074 #elif defined(arch_atomic64_fetch_sub)
3075         return arch_atomic64_fetch_sub(i, v);    3075         return arch_atomic64_fetch_sub(i, v);
3076 #else                                            3076 #else
3077 #error "Unable to define raw_atomic64_fetch_s    3077 #error "Unable to define raw_atomic64_fetch_sub_release"
3078 #endif                                           3078 #endif
3079 }                                                3079 }
3080                                                  3080 
3081 /**                                              3081 /**
3082  * raw_atomic64_fetch_sub_relaxed() - atomic     3082  * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
3083  * @i: s64 value to subtract                     3083  * @i: s64 value to subtract
3084  * @v: pointer to atomic64_t                     3084  * @v: pointer to atomic64_t
3085  *                                               3085  *
3086  * Atomically updates @v to (@v - @i) with re    3086  * Atomically updates @v to (@v - @i) with relaxed ordering.
3087  *                                               3087  *
3088  * Safe to use in noinstr code; prefer atomic    3088  * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
3089  *                                               3089  *
3090  * Return: The original value of @v.             3090  * Return: The original value of @v.
3091  */                                              3091  */
3092 static __always_inline s64                       3092 static __always_inline s64
3093 raw_atomic64_fetch_sub_relaxed(s64 i, atomic6    3093 raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
3094 {                                                3094 {
3095 #if defined(arch_atomic64_fetch_sub_relaxed)     3095 #if defined(arch_atomic64_fetch_sub_relaxed)
3096         return arch_atomic64_fetch_sub_relaxe    3096         return arch_atomic64_fetch_sub_relaxed(i, v);
3097 #elif defined(arch_atomic64_fetch_sub)           3097 #elif defined(arch_atomic64_fetch_sub)
3098         return arch_atomic64_fetch_sub(i, v);    3098         return arch_atomic64_fetch_sub(i, v);
3099 #else                                            3099 #else
3100 #error "Unable to define raw_atomic64_fetch_s    3100 #error "Unable to define raw_atomic64_fetch_sub_relaxed"
3101 #endif                                           3101 #endif
3102 }                                                3102 }
3103                                                  3103 
3104 /**                                              3104 /**
3105  * raw_atomic64_inc() - atomic increment with    3105  * raw_atomic64_inc() - atomic increment with relaxed ordering
3106  * @v: pointer to atomic64_t                     3106  * @v: pointer to atomic64_t
3107  *                                               3107  *
3108  * Atomically updates @v to (@v + 1) with rel    3108  * Atomically updates @v to (@v + 1) with relaxed ordering.
3109  *                                               3109  *
3110  * Safe to use in noinstr code; prefer atomic    3110  * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
3111  *                                               3111  *
3112  * Return: Nothing.                              3112  * Return: Nothing.
3113  */                                              3113  */
3114 static __always_inline void                      3114 static __always_inline void
3115 raw_atomic64_inc(atomic64_t *v)                  3115 raw_atomic64_inc(atomic64_t *v)
3116 {                                                3116 {
3117 #if defined(arch_atomic64_inc)                   3117 #if defined(arch_atomic64_inc)
3118         arch_atomic64_inc(v);                    3118         arch_atomic64_inc(v);
3119 #else                                            3119 #else
3120         raw_atomic64_add(1, v);                  3120         raw_atomic64_add(1, v);
3121 #endif                                           3121 #endif
3122 }                                                3122 }
3123                                                  3123 
3124 /**                                              3124 /**
3125  * raw_atomic64_inc_return() - atomic increme    3125  * raw_atomic64_inc_return() - atomic increment with full ordering
3126  * @v: pointer to atomic64_t                     3126  * @v: pointer to atomic64_t
3127  *                                               3127  *
3128  * Atomically updates @v to (@v + 1) with ful    3128  * Atomically updates @v to (@v + 1) with full ordering.
3129  *                                               3129  *
3130  * Safe to use in noinstr code; prefer atomic    3130  * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
3131  *                                               3131  *
3132  * Return: The updated value of @v.              3132  * Return: The updated value of @v.
3133  */                                              3133  */
3134 static __always_inline s64                       3134 static __always_inline s64
3135 raw_atomic64_inc_return(atomic64_t *v)           3135 raw_atomic64_inc_return(atomic64_t *v)
3136 {                                                3136 {
3137 #if defined(arch_atomic64_inc_return)            3137 #if defined(arch_atomic64_inc_return)
3138         return arch_atomic64_inc_return(v);      3138         return arch_atomic64_inc_return(v);
3139 #elif defined(arch_atomic64_inc_return_relaxe    3139 #elif defined(arch_atomic64_inc_return_relaxed)
3140         s64 ret;                                 3140         s64 ret;
3141         __atomic_pre_full_fence();               3141         __atomic_pre_full_fence();
3142         ret = arch_atomic64_inc_return_relaxe    3142         ret = arch_atomic64_inc_return_relaxed(v);
3143         __atomic_post_full_fence();              3143         __atomic_post_full_fence();
3144         return ret;                              3144         return ret;
3145 #else                                            3145 #else
3146         return raw_atomic64_add_return(1, v);    3146         return raw_atomic64_add_return(1, v);
3147 #endif                                           3147 #endif
3148 }                                                3148 }
3149                                                  3149 
3150 /**                                              3150 /**
3151  * raw_atomic64_inc_return_acquire() - atomic    3151  * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
3152  * @v: pointer to atomic64_t                     3152  * @v: pointer to atomic64_t
3153  *                                               3153  *
3154  * Atomically updates @v to (@v + 1) with acq    3154  * Atomically updates @v to (@v + 1) with acquire ordering.
3155  *                                               3155  *
3156  * Safe to use in noinstr code; prefer atomic    3156  * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
3157  *                                               3157  *
3158  * Return: The updated value of @v.              3158  * Return: The updated value of @v.
3159  */                                              3159  */
3160 static __always_inline s64                       3160 static __always_inline s64
3161 raw_atomic64_inc_return_acquire(atomic64_t *v    3161 raw_atomic64_inc_return_acquire(atomic64_t *v)
3162 {                                                3162 {
3163 #if defined(arch_atomic64_inc_return_acquire)    3163 #if defined(arch_atomic64_inc_return_acquire)
3164         return arch_atomic64_inc_return_acqui    3164         return arch_atomic64_inc_return_acquire(v);
3165 #elif defined(arch_atomic64_inc_return_relaxe    3165 #elif defined(arch_atomic64_inc_return_relaxed)
3166         s64 ret = arch_atomic64_inc_return_re    3166         s64 ret = arch_atomic64_inc_return_relaxed(v);
3167         __atomic_acquire_fence();                3167         __atomic_acquire_fence();
3168         return ret;                              3168         return ret;
3169 #elif defined(arch_atomic64_inc_return)          3169 #elif defined(arch_atomic64_inc_return)
3170         return arch_atomic64_inc_return(v);      3170         return arch_atomic64_inc_return(v);
3171 #else                                            3171 #else
3172         return raw_atomic64_add_return_acquir    3172         return raw_atomic64_add_return_acquire(1, v);
3173 #endif                                           3173 #endif
3174 }                                                3174 }
3175                                                  3175 
3176 /**                                              3176 /**
3177  * raw_atomic64_inc_return_release() - atomic    3177  * raw_atomic64_inc_return_release() - atomic increment with release ordering
3178  * @v: pointer to atomic64_t                     3178  * @v: pointer to atomic64_t
3179  *                                               3179  *
3180  * Atomically updates @v to (@v + 1) with rel    3180  * Atomically updates @v to (@v + 1) with release ordering.
3181  *                                               3181  *
3182  * Safe to use in noinstr code; prefer atomic    3182  * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
3183  *                                               3183  *
3184  * Return: The updated value of @v.              3184  * Return: The updated value of @v.
3185  */                                              3185  */
3186 static __always_inline s64                       3186 static __always_inline s64
3187 raw_atomic64_inc_return_release(atomic64_t *v    3187 raw_atomic64_inc_return_release(atomic64_t *v)
3188 {                                                3188 {
3189 #if defined(arch_atomic64_inc_return_release)    3189 #if defined(arch_atomic64_inc_return_release)
3190         return arch_atomic64_inc_return_relea    3190         return arch_atomic64_inc_return_release(v);
3191 #elif defined(arch_atomic64_inc_return_relaxe    3191 #elif defined(arch_atomic64_inc_return_relaxed)
3192         __atomic_release_fence();                3192         __atomic_release_fence();
3193         return arch_atomic64_inc_return_relax    3193         return arch_atomic64_inc_return_relaxed(v);
3194 #elif defined(arch_atomic64_inc_return)          3194 #elif defined(arch_atomic64_inc_return)
3195         return arch_atomic64_inc_return(v);      3195         return arch_atomic64_inc_return(v);
3196 #else                                            3196 #else
3197         return raw_atomic64_add_return_releas    3197         return raw_atomic64_add_return_release(1, v);
3198 #endif                                           3198 #endif
3199 }                                                3199 }
3200                                                  3200 
3201 /**                                              3201 /**
3202  * raw_atomic64_inc_return_relaxed() - atomic    3202  * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
3203  * @v: pointer to atomic64_t                     3203  * @v: pointer to atomic64_t
3204  *                                               3204  *
3205  * Atomically updates @v to (@v + 1) with rel    3205  * Atomically updates @v to (@v + 1) with relaxed ordering.
3206  *                                               3206  *
3207  * Safe to use in noinstr code; prefer atomic    3207  * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
3208  *                                               3208  *
3209  * Return: The updated value of @v.              3209  * Return: The updated value of @v.
3210  */                                              3210  */
3211 static __always_inline s64                       3211 static __always_inline s64
3212 raw_atomic64_inc_return_relaxed(atomic64_t *v    3212 raw_atomic64_inc_return_relaxed(atomic64_t *v)
3213 {                                                3213 {
3214 #if defined(arch_atomic64_inc_return_relaxed)    3214 #if defined(arch_atomic64_inc_return_relaxed)
3215         return arch_atomic64_inc_return_relax    3215         return arch_atomic64_inc_return_relaxed(v);
3216 #elif defined(arch_atomic64_inc_return)          3216 #elif defined(arch_atomic64_inc_return)
3217         return arch_atomic64_inc_return(v);      3217         return arch_atomic64_inc_return(v);
3218 #else                                            3218 #else
3219         return raw_atomic64_add_return_relaxe    3219         return raw_atomic64_add_return_relaxed(1, v);
3220 #endif                                           3220 #endif
3221 }                                                3221 }
3222                                                  3222 
3223 /**                                              3223 /**
3224  * raw_atomic64_fetch_inc() - atomic incremen    3224  * raw_atomic64_fetch_inc() - atomic increment with full ordering
3225  * @v: pointer to atomic64_t                     3225  * @v: pointer to atomic64_t
3226  *                                               3226  *
3227  * Atomically updates @v to (@v + 1) with ful    3227  * Atomically updates @v to (@v + 1) with full ordering.
3228  *                                               3228  *
3229  * Safe to use in noinstr code; prefer atomic    3229  * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
3230  *                                               3230  *
3231  * Return: The original value of @v.             3231  * Return: The original value of @v.
3232  */                                              3232  */
3233 static __always_inline s64                       3233 static __always_inline s64
3234 raw_atomic64_fetch_inc(atomic64_t *v)            3234 raw_atomic64_fetch_inc(atomic64_t *v)
3235 {                                                3235 {
3236 #if defined(arch_atomic64_fetch_inc)             3236 #if defined(arch_atomic64_fetch_inc)
3237         return arch_atomic64_fetch_inc(v);       3237         return arch_atomic64_fetch_inc(v);
3238 #elif defined(arch_atomic64_fetch_inc_relaxed    3238 #elif defined(arch_atomic64_fetch_inc_relaxed)
3239         s64 ret;                                 3239         s64 ret;
3240         __atomic_pre_full_fence();               3240         __atomic_pre_full_fence();
3241         ret = arch_atomic64_fetch_inc_relaxed    3241         ret = arch_atomic64_fetch_inc_relaxed(v);
3242         __atomic_post_full_fence();              3242         __atomic_post_full_fence();
3243         return ret;                              3243         return ret;
3244 #else                                            3244 #else
3245         return raw_atomic64_fetch_add(1, v);     3245         return raw_atomic64_fetch_add(1, v);
3246 #endif                                           3246 #endif
3247 }                                                3247 }
3248                                                  3248 
3249 /**                                              3249 /**
3250  * raw_atomic64_fetch_inc_acquire() - atomic     3250  * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
3251  * @v: pointer to atomic64_t                     3251  * @v: pointer to atomic64_t
3252  *                                               3252  *
3253  * Atomically updates @v to (@v + 1) with acq    3253  * Atomically updates @v to (@v + 1) with acquire ordering.
3254  *                                               3254  *
3255  * Safe to use in noinstr code; prefer atomic    3255  * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
3256  *                                               3256  *
3257  * Return: The original value of @v.             3257  * Return: The original value of @v.
3258  */                                              3258  */
3259 static __always_inline s64                       3259 static __always_inline s64
3260 raw_atomic64_fetch_inc_acquire(atomic64_t *v)    3260 raw_atomic64_fetch_inc_acquire(atomic64_t *v)
3261 {                                                3261 {
3262 #if defined(arch_atomic64_fetch_inc_acquire)     3262 #if defined(arch_atomic64_fetch_inc_acquire)
3263         return arch_atomic64_fetch_inc_acquir    3263         return arch_atomic64_fetch_inc_acquire(v);
3264 #elif defined(arch_atomic64_fetch_inc_relaxed    3264 #elif defined(arch_atomic64_fetch_inc_relaxed)
3265         s64 ret = arch_atomic64_fetch_inc_rel    3265         s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3266         __atomic_acquire_fence();                3266         __atomic_acquire_fence();
3267         return ret;                              3267         return ret;
3268 #elif defined(arch_atomic64_fetch_inc)           3268 #elif defined(arch_atomic64_fetch_inc)
3269         return arch_atomic64_fetch_inc(v);       3269         return arch_atomic64_fetch_inc(v);
3270 #else                                            3270 #else
3271         return raw_atomic64_fetch_add_acquire    3271         return raw_atomic64_fetch_add_acquire(1, v);
3272 #endif                                           3272 #endif
3273 }                                                3273 }
3274                                                  3274 
3275 /**                                              3275 /**
3276  * raw_atomic64_fetch_inc_release() - atomic     3276  * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
3277  * @v: pointer to atomic64_t                     3277  * @v: pointer to atomic64_t
3278  *                                               3278  *
3279  * Atomically updates @v to (@v + 1) with rel    3279  * Atomically updates @v to (@v + 1) with release ordering.
3280  *                                               3280  *
3281  * Safe to use in noinstr code; prefer atomic    3281  * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
3282  *                                               3282  *
3283  * Return: The original value of @v.             3283  * Return: The original value of @v.
3284  */                                              3284  */
3285 static __always_inline s64                       3285 static __always_inline s64
3286 raw_atomic64_fetch_inc_release(atomic64_t *v)    3286 raw_atomic64_fetch_inc_release(atomic64_t *v)
3287 {                                                3287 {
3288 #if defined(arch_atomic64_fetch_inc_release)     3288 #if defined(arch_atomic64_fetch_inc_release)
3289         return arch_atomic64_fetch_inc_releas    3289         return arch_atomic64_fetch_inc_release(v);
3290 #elif defined(arch_atomic64_fetch_inc_relaxed    3290 #elif defined(arch_atomic64_fetch_inc_relaxed)
3291         __atomic_release_fence();                3291         __atomic_release_fence();
3292         return arch_atomic64_fetch_inc_relaxe    3292         return arch_atomic64_fetch_inc_relaxed(v);
3293 #elif defined(arch_atomic64_fetch_inc)           3293 #elif defined(arch_atomic64_fetch_inc)
3294         return arch_atomic64_fetch_inc(v);       3294         return arch_atomic64_fetch_inc(v);
3295 #else                                            3295 #else
3296         return raw_atomic64_fetch_add_release    3296         return raw_atomic64_fetch_add_release(1, v);
3297 #endif                                           3297 #endif
3298 }                                                3298 }
3299                                                  3299 
3300 /**                                              3300 /**
3301  * raw_atomic64_fetch_inc_relaxed() - atomic     3301  * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
3302  * @v: pointer to atomic64_t                     3302  * @v: pointer to atomic64_t
3303  *                                               3303  *
3304  * Atomically updates @v to (@v + 1) with rel    3304  * Atomically updates @v to (@v + 1) with relaxed ordering.
3305  *                                               3305  *
3306  * Safe to use in noinstr code; prefer atomic    3306  * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
3307  *                                               3307  *
3308  * Return: The original value of @v.             3308  * Return: The original value of @v.
3309  */                                              3309  */
3310 static __always_inline s64                       3310 static __always_inline s64
3311 raw_atomic64_fetch_inc_relaxed(atomic64_t *v)    3311 raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
3312 {                                                3312 {
3313 #if defined(arch_atomic64_fetch_inc_relaxed)     3313 #if defined(arch_atomic64_fetch_inc_relaxed)
3314         return arch_atomic64_fetch_inc_relaxe    3314         return arch_atomic64_fetch_inc_relaxed(v);
3315 #elif defined(arch_atomic64_fetch_inc)           3315 #elif defined(arch_atomic64_fetch_inc)
3316         return arch_atomic64_fetch_inc(v);       3316         return arch_atomic64_fetch_inc(v);
3317 #else                                            3317 #else
3318         return raw_atomic64_fetch_add_relaxed    3318         return raw_atomic64_fetch_add_relaxed(1, v);
3319 #endif                                           3319 #endif
3320 }                                                3320 }
3321                                                  3321 
3322 /**                                              3322 /**
3323  * raw_atomic64_dec() - atomic decrement with    3323  * raw_atomic64_dec() - atomic decrement with relaxed ordering
3324  * @v: pointer to atomic64_t                     3324  * @v: pointer to atomic64_t
3325  *                                               3325  *
3326  * Atomically updates @v to (@v - 1) with rel    3326  * Atomically updates @v to (@v - 1) with relaxed ordering.
3327  *                                               3327  *
3328  * Safe to use in noinstr code; prefer atomic    3328  * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
3329  *                                               3329  *
3330  * Return: Nothing.                              3330  * Return: Nothing.
3331  */                                              3331  */
3332 static __always_inline void                      3332 static __always_inline void
3333 raw_atomic64_dec(atomic64_t *v)                  3333 raw_atomic64_dec(atomic64_t *v)
3334 {                                                3334 {
3335 #if defined(arch_atomic64_dec)                   3335 #if defined(arch_atomic64_dec)
3336         arch_atomic64_dec(v);                    3336         arch_atomic64_dec(v);
3337 #else                                            3337 #else
3338         raw_atomic64_sub(1, v);                  3338         raw_atomic64_sub(1, v);
3339 #endif                                           3339 #endif
3340 }                                                3340 }
3341                                                  3341 
3342 /**                                              3342 /**
3343  * raw_atomic64_dec_return() - atomic decreme    3343  * raw_atomic64_dec_return() - atomic decrement with full ordering
3344  * @v: pointer to atomic64_t                     3344  * @v: pointer to atomic64_t
3345  *                                               3345  *
3346  * Atomically updates @v to (@v - 1) with ful    3346  * Atomically updates @v to (@v - 1) with full ordering.
3347  *                                               3347  *
3348  * Safe to use in noinstr code; prefer atomic    3348  * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
3349  *                                               3349  *
3350  * Return: The updated value of @v.              3350  * Return: The updated value of @v.
3351  */                                              3351  */
3352 static __always_inline s64                       3352 static __always_inline s64
3353 raw_atomic64_dec_return(atomic64_t *v)           3353 raw_atomic64_dec_return(atomic64_t *v)
3354 {                                                3354 {
3355 #if defined(arch_atomic64_dec_return)            3355 #if defined(arch_atomic64_dec_return)
3356         return arch_atomic64_dec_return(v);      3356         return arch_atomic64_dec_return(v);
3357 #elif defined(arch_atomic64_dec_return_relaxe    3357 #elif defined(arch_atomic64_dec_return_relaxed)
3358         s64 ret;                                 3358         s64 ret;
3359         __atomic_pre_full_fence();               3359         __atomic_pre_full_fence();
3360         ret = arch_atomic64_dec_return_relaxe    3360         ret = arch_atomic64_dec_return_relaxed(v);
3361         __atomic_post_full_fence();              3361         __atomic_post_full_fence();
3362         return ret;                              3362         return ret;
3363 #else                                            3363 #else
3364         return raw_atomic64_sub_return(1, v);    3364         return raw_atomic64_sub_return(1, v);
3365 #endif                                           3365 #endif
3366 }                                                3366 }
3367                                                  3367 
3368 /**                                              3368 /**
3369  * raw_atomic64_dec_return_acquire() - atomic    3369  * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
3370  * @v: pointer to atomic64_t                     3370  * @v: pointer to atomic64_t
3371  *                                               3371  *
3372  * Atomically updates @v to (@v - 1) with acq    3372  * Atomically updates @v to (@v - 1) with acquire ordering.
3373  *                                               3373  *
3374  * Safe to use in noinstr code; prefer atomic    3374  * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
3375  *                                               3375  *
3376  * Return: The updated value of @v.              3376  * Return: The updated value of @v.
3377  */                                              3377  */
3378 static __always_inline s64                       3378 static __always_inline s64
3379 raw_atomic64_dec_return_acquire(atomic64_t *v    3379 raw_atomic64_dec_return_acquire(atomic64_t *v)
3380 {                                                3380 {
3381 #if defined(arch_atomic64_dec_return_acquire)    3381 #if defined(arch_atomic64_dec_return_acquire)
3382         return arch_atomic64_dec_return_acqui    3382         return arch_atomic64_dec_return_acquire(v);
3383 #elif defined(arch_atomic64_dec_return_relaxe    3383 #elif defined(arch_atomic64_dec_return_relaxed)
3384         s64 ret = arch_atomic64_dec_return_re    3384         s64 ret = arch_atomic64_dec_return_relaxed(v);
3385         __atomic_acquire_fence();                3385         __atomic_acquire_fence();
3386         return ret;                              3386         return ret;
3387 #elif defined(arch_atomic64_dec_return)          3387 #elif defined(arch_atomic64_dec_return)
3388         return arch_atomic64_dec_return(v);      3388         return arch_atomic64_dec_return(v);
3389 #else                                            3389 #else
3390         return raw_atomic64_sub_return_acquir    3390         return raw_atomic64_sub_return_acquire(1, v);
3391 #endif                                           3391 #endif
3392 }                                                3392 }
3393                                                  3393 
3394 /**                                              3394 /**
3395  * raw_atomic64_dec_return_release() - atomic    3395  * raw_atomic64_dec_return_release() - atomic decrement with release ordering
3396  * @v: pointer to atomic64_t                     3396  * @v: pointer to atomic64_t
3397  *                                               3397  *
3398  * Atomically updates @v to (@v - 1) with rel    3398  * Atomically updates @v to (@v - 1) with release ordering.
3399  *                                               3399  *
3400  * Safe to use in noinstr code; prefer atomic    3400  * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
3401  *                                               3401  *
3402  * Return: The updated value of @v.              3402  * Return: The updated value of @v.
3403  */                                              3403  */
3404 static __always_inline s64                       3404 static __always_inline s64
3405 raw_atomic64_dec_return_release(atomic64_t *v    3405 raw_atomic64_dec_return_release(atomic64_t *v)
3406 {                                                3406 {
3407 #if defined(arch_atomic64_dec_return_release)    3407 #if defined(arch_atomic64_dec_return_release)
3408         return arch_atomic64_dec_return_relea    3408         return arch_atomic64_dec_return_release(v);
3409 #elif defined(arch_atomic64_dec_return_relaxe    3409 #elif defined(arch_atomic64_dec_return_relaxed)
3410         __atomic_release_fence();                3410         __atomic_release_fence();
3411         return arch_atomic64_dec_return_relax    3411         return arch_atomic64_dec_return_relaxed(v);
3412 #elif defined(arch_atomic64_dec_return)          3412 #elif defined(arch_atomic64_dec_return)
3413         return arch_atomic64_dec_return(v);      3413         return arch_atomic64_dec_return(v);
3414 #else                                            3414 #else
3415         return raw_atomic64_sub_return_releas    3415         return raw_atomic64_sub_return_release(1, v);
3416 #endif                                           3416 #endif
3417 }                                                3417 }
3418                                                  3418 
3419 /**                                              3419 /**
3420  * raw_atomic64_dec_return_relaxed() - atomic    3420  * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
3421  * @v: pointer to atomic64_t                     3421  * @v: pointer to atomic64_t
3422  *                                               3422  *
3423  * Atomically updates @v to (@v - 1) with rel    3423  * Atomically updates @v to (@v - 1) with relaxed ordering.
3424  *                                               3424  *
3425  * Safe to use in noinstr code; prefer atomic    3425  * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
3426  *                                               3426  *
3427  * Return: The updated value of @v.              3427  * Return: The updated value of @v.
3428  */                                              3428  */
3429 static __always_inline s64                       3429 static __always_inline s64
3430 raw_atomic64_dec_return_relaxed(atomic64_t *v    3430 raw_atomic64_dec_return_relaxed(atomic64_t *v)
3431 {                                                3431 {
3432 #if defined(arch_atomic64_dec_return_relaxed)    3432 #if defined(arch_atomic64_dec_return_relaxed)
3433         return arch_atomic64_dec_return_relax    3433         return arch_atomic64_dec_return_relaxed(v);
3434 #elif defined(arch_atomic64_dec_return)          3434 #elif defined(arch_atomic64_dec_return)
3435         return arch_atomic64_dec_return(v);      3435         return arch_atomic64_dec_return(v);
3436 #else                                            3436 #else
3437         return raw_atomic64_sub_return_relaxe    3437         return raw_atomic64_sub_return_relaxed(1, v);
3438 #endif                                           3438 #endif
3439 }                                                3439 }
3440                                                  3440 
3441 /**                                              3441 /**
3442  * raw_atomic64_fetch_dec() - atomic decremen    3442  * raw_atomic64_fetch_dec() - atomic decrement with full ordering
3443  * @v: pointer to atomic64_t                     3443  * @v: pointer to atomic64_t
3444  *                                               3444  *
3445  * Atomically updates @v to (@v - 1) with ful    3445  * Atomically updates @v to (@v - 1) with full ordering.
3446  *                                               3446  *
3447  * Safe to use in noinstr code; prefer atomic    3447  * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
3448  *                                               3448  *
3449  * Return: The original value of @v.             3449  * Return: The original value of @v.
3450  */                                              3450  */
3451 static __always_inline s64                       3451 static __always_inline s64
3452 raw_atomic64_fetch_dec(atomic64_t *v)            3452 raw_atomic64_fetch_dec(atomic64_t *v)
3453 {                                                3453 {
3454 #if defined(arch_atomic64_fetch_dec)             3454 #if defined(arch_atomic64_fetch_dec)
3455         return arch_atomic64_fetch_dec(v);       3455         return arch_atomic64_fetch_dec(v);
3456 #elif defined(arch_atomic64_fetch_dec_relaxed    3456 #elif defined(arch_atomic64_fetch_dec_relaxed)
3457         s64 ret;                                 3457         s64 ret;
3458         __atomic_pre_full_fence();               3458         __atomic_pre_full_fence();
3459         ret = arch_atomic64_fetch_dec_relaxed    3459         ret = arch_atomic64_fetch_dec_relaxed(v);
3460         __atomic_post_full_fence();              3460         __atomic_post_full_fence();
3461         return ret;                              3461         return ret;
3462 #else                                            3462 #else
3463         return raw_atomic64_fetch_sub(1, v);     3463         return raw_atomic64_fetch_sub(1, v);
3464 #endif                                           3464 #endif
3465 }                                                3465 }
3466                                                  3466 
3467 /**                                              3467 /**
3468  * raw_atomic64_fetch_dec_acquire() - atomic     3468  * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
3469  * @v: pointer to atomic64_t                     3469  * @v: pointer to atomic64_t
3470  *                                               3470  *
3471  * Atomically updates @v to (@v - 1) with acq    3471  * Atomically updates @v to (@v - 1) with acquire ordering.
3472  *                                               3472  *
3473  * Safe to use in noinstr code; prefer atomic    3473  * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
3474  *                                               3474  *
3475  * Return: The original value of @v.             3475  * Return: The original value of @v.
3476  */                                              3476  */
3477 static __always_inline s64                       3477 static __always_inline s64
3478 raw_atomic64_fetch_dec_acquire(atomic64_t *v)    3478 raw_atomic64_fetch_dec_acquire(atomic64_t *v)
3479 {                                                3479 {
3480 #if defined(arch_atomic64_fetch_dec_acquire)     3480 #if defined(arch_atomic64_fetch_dec_acquire)
3481         return arch_atomic64_fetch_dec_acquir    3481         return arch_atomic64_fetch_dec_acquire(v);
3482 #elif defined(arch_atomic64_fetch_dec_relaxed    3482 #elif defined(arch_atomic64_fetch_dec_relaxed)
3483         s64 ret = arch_atomic64_fetch_dec_rel    3483         s64 ret = arch_atomic64_fetch_dec_relaxed(v);
3484         __atomic_acquire_fence();                3484         __atomic_acquire_fence();
3485         return ret;                              3485         return ret;
3486 #elif defined(arch_atomic64_fetch_dec)           3486 #elif defined(arch_atomic64_fetch_dec)
3487         return arch_atomic64_fetch_dec(v);       3487         return arch_atomic64_fetch_dec(v);
3488 #else                                            3488 #else
3489         return raw_atomic64_fetch_sub_acquire    3489         return raw_atomic64_fetch_sub_acquire(1, v);
3490 #endif                                           3490 #endif
3491 }                                                3491 }
3492                                                  3492 
3493 /**                                              3493 /**
3494  * raw_atomic64_fetch_dec_release() - atomic     3494  * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
3495  * @v: pointer to atomic64_t                     3495  * @v: pointer to atomic64_t
3496  *                                               3496  *
3497  * Atomically updates @v to (@v - 1) with rel    3497  * Atomically updates @v to (@v - 1) with release ordering.
3498  *                                               3498  *
3499  * Safe to use in noinstr code; prefer atomic    3499  * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
3500  *                                               3500  *
3501  * Return: The original value of @v.             3501  * Return: The original value of @v.
3502  */                                              3502  */
3503 static __always_inline s64                       3503 static __always_inline s64
3504 raw_atomic64_fetch_dec_release(atomic64_t *v)    3504 raw_atomic64_fetch_dec_release(atomic64_t *v)
3505 {                                                3505 {
3506 #if defined(arch_atomic64_fetch_dec_release)     3506 #if defined(arch_atomic64_fetch_dec_release)
3507         return arch_atomic64_fetch_dec_releas    3507         return arch_atomic64_fetch_dec_release(v);
3508 #elif defined(arch_atomic64_fetch_dec_relaxed    3508 #elif defined(arch_atomic64_fetch_dec_relaxed)
3509         __atomic_release_fence();                3509         __atomic_release_fence();
3510         return arch_atomic64_fetch_dec_relaxe    3510         return arch_atomic64_fetch_dec_relaxed(v);
3511 #elif defined(arch_atomic64_fetch_dec)           3511 #elif defined(arch_atomic64_fetch_dec)
3512         return arch_atomic64_fetch_dec(v);       3512         return arch_atomic64_fetch_dec(v);
3513 #else                                            3513 #else
3514         return raw_atomic64_fetch_sub_release    3514         return raw_atomic64_fetch_sub_release(1, v);
3515 #endif                                           3515 #endif
3516 }                                                3516 }
3517                                                  3517 
3518 /**                                              3518 /**
3519  * raw_atomic64_fetch_dec_relaxed() - atomic     3519  * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
3520  * @v: pointer to atomic64_t                     3520  * @v: pointer to atomic64_t
3521  *                                               3521  *
3522  * Atomically updates @v to (@v - 1) with rel    3522  * Atomically updates @v to (@v - 1) with relaxed ordering.
3523  *                                               3523  *
3524  * Safe to use in noinstr code; prefer atomic    3524  * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
3525  *                                               3525  *
3526  * Return: The original value of @v.             3526  * Return: The original value of @v.
3527  */                                              3527  */
3528 static __always_inline s64                       3528 static __always_inline s64
3529 raw_atomic64_fetch_dec_relaxed(atomic64_t *v)    3529 raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
3530 {                                                3530 {
3531 #if defined(arch_atomic64_fetch_dec_relaxed)     3531 #if defined(arch_atomic64_fetch_dec_relaxed)
3532         return arch_atomic64_fetch_dec_relaxe    3532         return arch_atomic64_fetch_dec_relaxed(v);
3533 #elif defined(arch_atomic64_fetch_dec)           3533 #elif defined(arch_atomic64_fetch_dec)
3534         return arch_atomic64_fetch_dec(v);       3534         return arch_atomic64_fetch_dec(v);
3535 #else                                            3535 #else
3536         return raw_atomic64_fetch_sub_relaxed    3536         return raw_atomic64_fetch_sub_relaxed(1, v);
3537 #endif                                           3537 #endif
3538 }                                                3538 }
3539                                                  3539 
3540 /**                                              3540 /**
3541  * raw_atomic64_and() - atomic bitwise AND wi    3541  * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
3542  * @i: s64 value                                 3542  * @i: s64 value
3543  * @v: pointer to atomic64_t                     3543  * @v: pointer to atomic64_t
3544  *                                               3544  *
3545  * Atomically updates @v to (@v & @i) with re    3545  * Atomically updates @v to (@v & @i) with relaxed ordering.
3546  *                                               3546  *
3547  * Safe to use in noinstr code; prefer atomic    3547  * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
3548  *                                               3548  *
3549  * Return: Nothing.                              3549  * Return: Nothing.
3550  */                                              3550  */
3551 static __always_inline void                      3551 static __always_inline void
3552 raw_atomic64_and(s64 i, atomic64_t *v)           3552 raw_atomic64_and(s64 i, atomic64_t *v)
3553 {                                                3553 {
3554         arch_atomic64_and(i, v);                 3554         arch_atomic64_and(i, v);
3555 }                                                3555 }
3556                                                  3556 
3557 /**                                              3557 /**
3558  * raw_atomic64_fetch_and() - atomic bitwise     3558  * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
3559  * @i: s64 value                                 3559  * @i: s64 value
3560  * @v: pointer to atomic64_t                     3560  * @v: pointer to atomic64_t
3561  *                                               3561  *
3562  * Atomically updates @v to (@v & @i) with fu    3562  * Atomically updates @v to (@v & @i) with full ordering.
3563  *                                               3563  *
3564  * Safe to use in noinstr code; prefer atomic    3564  * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
3565  *                                               3565  *
3566  * Return: The original value of @v.             3566  * Return: The original value of @v.
3567  */                                              3567  */
3568 static __always_inline s64                       3568 static __always_inline s64
3569 raw_atomic64_fetch_and(s64 i, atomic64_t *v)     3569 raw_atomic64_fetch_and(s64 i, atomic64_t *v)
3570 {                                                3570 {
3571 #if defined(arch_atomic64_fetch_and)             3571 #if defined(arch_atomic64_fetch_and)
3572         return arch_atomic64_fetch_and(i, v);    3572         return arch_atomic64_fetch_and(i, v);
3573 #elif defined(arch_atomic64_fetch_and_relaxed    3573 #elif defined(arch_atomic64_fetch_and_relaxed)
3574         s64 ret;                                 3574         s64 ret;
3575         __atomic_pre_full_fence();               3575         __atomic_pre_full_fence();
3576         ret = arch_atomic64_fetch_and_relaxed    3576         ret = arch_atomic64_fetch_and_relaxed(i, v);
3577         __atomic_post_full_fence();              3577         __atomic_post_full_fence();
3578         return ret;                              3578         return ret;
3579 #else                                            3579 #else
3580 #error "Unable to define raw_atomic64_fetch_a    3580 #error "Unable to define raw_atomic64_fetch_and"
3581 #endif                                           3581 #endif
3582 }                                                3582 }
3583                                                  3583 
3584 /**                                              3584 /**
3585  * raw_atomic64_fetch_and_acquire() - atomic     3585  * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
3586  * @i: s64 value                                 3586  * @i: s64 value
3587  * @v: pointer to atomic64_t                     3587  * @v: pointer to atomic64_t
3588  *                                               3588  *
3589  * Atomically updates @v to (@v & @i) with ac    3589  * Atomically updates @v to (@v & @i) with acquire ordering.
3590  *                                               3590  *
3591  * Safe to use in noinstr code; prefer atomic    3591  * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
3592  *                                               3592  *
3593  * Return: The original value of @v.             3593  * Return: The original value of @v.
3594  */                                              3594  */
3595 static __always_inline s64                       3595 static __always_inline s64
3596 raw_atomic64_fetch_and_acquire(s64 i, atomic6    3596 raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3597 {                                                3597 {
3598 #if defined(arch_atomic64_fetch_and_acquire)     3598 #if defined(arch_atomic64_fetch_and_acquire)
3599         return arch_atomic64_fetch_and_acquir    3599         return arch_atomic64_fetch_and_acquire(i, v);
3600 #elif defined(arch_atomic64_fetch_and_relaxed    3600 #elif defined(arch_atomic64_fetch_and_relaxed)
3601         s64 ret = arch_atomic64_fetch_and_rel    3601         s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3602         __atomic_acquire_fence();                3602         __atomic_acquire_fence();
3603         return ret;                              3603         return ret;
3604 #elif defined(arch_atomic64_fetch_and)           3604 #elif defined(arch_atomic64_fetch_and)
3605         return arch_atomic64_fetch_and(i, v);    3605         return arch_atomic64_fetch_and(i, v);
3606 #else                                            3606 #else
3607 #error "Unable to define raw_atomic64_fetch_a    3607 #error "Unable to define raw_atomic64_fetch_and_acquire"
3608 #endif                                           3608 #endif
3609 }                                                3609 }
3610                                                  3610 
3611 /**                                              3611 /**
3612  * raw_atomic64_fetch_and_release() - atomic     3612  * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
3613  * @i: s64 value                                 3613  * @i: s64 value
3614  * @v: pointer to atomic64_t                     3614  * @v: pointer to atomic64_t
3615  *                                               3615  *
3616  * Atomically updates @v to (@v & @i) with re    3616  * Atomically updates @v to (@v & @i) with release ordering.
3617  *                                               3617  *
3618  * Safe to use in noinstr code; prefer atomic    3618  * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
3619  *                                               3619  *
3620  * Return: The original value of @v.             3620  * Return: The original value of @v.
3621  */                                              3621  */
3622 static __always_inline s64                       3622 static __always_inline s64
3623 raw_atomic64_fetch_and_release(s64 i, atomic6    3623 raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3624 {                                                3624 {
3625 #if defined(arch_atomic64_fetch_and_release)     3625 #if defined(arch_atomic64_fetch_and_release)
3626         return arch_atomic64_fetch_and_releas    3626         return arch_atomic64_fetch_and_release(i, v);
3627 #elif defined(arch_atomic64_fetch_and_relaxed    3627 #elif defined(arch_atomic64_fetch_and_relaxed)
3628         __atomic_release_fence();                3628         __atomic_release_fence();
3629         return arch_atomic64_fetch_and_relaxe    3629         return arch_atomic64_fetch_and_relaxed(i, v);
3630 #elif defined(arch_atomic64_fetch_and)           3630 #elif defined(arch_atomic64_fetch_and)
3631         return arch_atomic64_fetch_and(i, v);    3631         return arch_atomic64_fetch_and(i, v);
3632 #else                                            3632 #else
3633 #error "Unable to define raw_atomic64_fetch_a    3633 #error "Unable to define raw_atomic64_fetch_and_release"
3634 #endif                                           3634 #endif
3635 }                                                3635 }
3636                                                  3636 
3637 /**                                              3637 /**
3638  * raw_atomic64_fetch_and_relaxed() - atomic     3638  * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
3639  * @i: s64 value                                 3639  * @i: s64 value
3640  * @v: pointer to atomic64_t                     3640  * @v: pointer to atomic64_t
3641  *                                               3641  *
3642  * Atomically updates @v to (@v & @i) with re    3642  * Atomically updates @v to (@v & @i) with relaxed ordering.
3643  *                                               3643  *
3644  * Safe to use in noinstr code; prefer atomic    3644  * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
3645  *                                               3645  *
3646  * Return: The original value of @v.             3646  * Return: The original value of @v.
3647  */                                              3647  */
3648 static __always_inline s64                       3648 static __always_inline s64
3649 raw_atomic64_fetch_and_relaxed(s64 i, atomic6    3649 raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
3650 {                                                3650 {
3651 #if defined(arch_atomic64_fetch_and_relaxed)     3651 #if defined(arch_atomic64_fetch_and_relaxed)
3652         return arch_atomic64_fetch_and_relaxe    3652         return arch_atomic64_fetch_and_relaxed(i, v);
3653 #elif defined(arch_atomic64_fetch_and)           3653 #elif defined(arch_atomic64_fetch_and)
3654         return arch_atomic64_fetch_and(i, v);    3654         return arch_atomic64_fetch_and(i, v);
3655 #else                                            3655 #else
3656 #error "Unable to define raw_atomic64_fetch_a    3656 #error "Unable to define raw_atomic64_fetch_and_relaxed"
3657 #endif                                           3657 #endif
3658 }                                                3658 }
3659                                                  3659 
3660 /**                                              3660 /**
3661  * raw_atomic64_andnot() - atomic bitwise AND    3661  * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
3662  * @i: s64 value                                 3662  * @i: s64 value
3663  * @v: pointer to atomic64_t                     3663  * @v: pointer to atomic64_t
3664  *                                               3664  *
3665  * Atomically updates @v to (@v & ~@i) with r    3665  * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3666  *                                               3666  *
3667  * Safe to use in noinstr code; prefer atomic    3667  * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
3668  *                                               3668  *
3669  * Return: Nothing.                              3669  * Return: Nothing.
3670  */                                              3670  */
3671 static __always_inline void                      3671 static __always_inline void
3672 raw_atomic64_andnot(s64 i, atomic64_t *v)        3672 raw_atomic64_andnot(s64 i, atomic64_t *v)
3673 {                                                3673 {
3674 #if defined(arch_atomic64_andnot)                3674 #if defined(arch_atomic64_andnot)
3675         arch_atomic64_andnot(i, v);              3675         arch_atomic64_andnot(i, v);
3676 #else                                            3676 #else
3677         raw_atomic64_and(~i, v);                 3677         raw_atomic64_and(~i, v);
3678 #endif                                           3678 #endif
3679 }                                                3679 }
3680                                                  3680 
3681 /**                                              3681 /**
3682  * raw_atomic64_fetch_andnot() - atomic bitwi    3682  * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
3683  * @i: s64 value                                 3683  * @i: s64 value
3684  * @v: pointer to atomic64_t                     3684  * @v: pointer to atomic64_t
3685  *                                               3685  *
3686  * Atomically updates @v to (@v & ~@i) with f    3686  * Atomically updates @v to (@v & ~@i) with full ordering.
3687  *                                               3687  *
3688  * Safe to use in noinstr code; prefer atomic    3688  * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
3689  *                                               3689  *
3690  * Return: The original value of @v.             3690  * Return: The original value of @v.
3691  */                                              3691  */
3692 static __always_inline s64                       3692 static __always_inline s64
3693 raw_atomic64_fetch_andnot(s64 i, atomic64_t *    3693 raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3694 {                                                3694 {
3695 #if defined(arch_atomic64_fetch_andnot)          3695 #if defined(arch_atomic64_fetch_andnot)
3696         return arch_atomic64_fetch_andnot(i,     3696         return arch_atomic64_fetch_andnot(i, v);
3697 #elif defined(arch_atomic64_fetch_andnot_rela    3697 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3698         s64 ret;                                 3698         s64 ret;
3699         __atomic_pre_full_fence();               3699         __atomic_pre_full_fence();
3700         ret = arch_atomic64_fetch_andnot_rela    3700         ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3701         __atomic_post_full_fence();              3701         __atomic_post_full_fence();
3702         return ret;                              3702         return ret;
3703 #else                                            3703 #else
3704         return raw_atomic64_fetch_and(~i, v);    3704         return raw_atomic64_fetch_and(~i, v);
3705 #endif                                           3705 #endif
3706 }                                                3706 }
3707                                                  3707 
3708 /**                                              3708 /**
3709  * raw_atomic64_fetch_andnot_acquire() - atom    3709  * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
3710  * @i: s64 value                                 3710  * @i: s64 value
3711  * @v: pointer to atomic64_t                     3711  * @v: pointer to atomic64_t
3712  *                                               3712  *
3713  * Atomically updates @v to (@v & ~@i) with a    3713  * Atomically updates @v to (@v & ~@i) with acquire ordering.
3714  *                                               3714  *
3715  * Safe to use in noinstr code; prefer atomic    3715  * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
3716  *                                               3716  *
3717  * Return: The original value of @v.             3717  * Return: The original value of @v.
3718  */                                              3718  */
3719 static __always_inline s64                       3719 static __always_inline s64
3720 raw_atomic64_fetch_andnot_acquire(s64 i, atom    3720 raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3721 {                                                3721 {
3722 #if defined(arch_atomic64_fetch_andnot_acquir    3722 #if defined(arch_atomic64_fetch_andnot_acquire)
3723         return arch_atomic64_fetch_andnot_acq    3723         return arch_atomic64_fetch_andnot_acquire(i, v);
3724 #elif defined(arch_atomic64_fetch_andnot_rela    3724 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3725         s64 ret = arch_atomic64_fetch_andnot_    3725         s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3726         __atomic_acquire_fence();                3726         __atomic_acquire_fence();
3727         return ret;                              3727         return ret;
3728 #elif defined(arch_atomic64_fetch_andnot)        3728 #elif defined(arch_atomic64_fetch_andnot)
3729         return arch_atomic64_fetch_andnot(i,     3729         return arch_atomic64_fetch_andnot(i, v);
3730 #else                                            3730 #else
3731         return raw_atomic64_fetch_and_acquire    3731         return raw_atomic64_fetch_and_acquire(~i, v);
3732 #endif                                           3732 #endif
3733 }                                                3733 }
3734                                                  3734 
3735 /**                                              3735 /**
3736  * raw_atomic64_fetch_andnot_release() - atom    3736  * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
3737  * @i: s64 value                                 3737  * @i: s64 value
3738  * @v: pointer to atomic64_t                     3738  * @v: pointer to atomic64_t
3739  *                                               3739  *
3740  * Atomically updates @v to (@v & ~@i) with r    3740  * Atomically updates @v to (@v & ~@i) with release ordering.
3741  *                                               3741  *
3742  * Safe to use in noinstr code; prefer atomic    3742  * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
3743  *                                               3743  *
3744  * Return: The original value of @v.             3744  * Return: The original value of @v.
3745  */                                              3745  */
3746 static __always_inline s64                       3746 static __always_inline s64
3747 raw_atomic64_fetch_andnot_release(s64 i, atom    3747 raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3748 {                                                3748 {
3749 #if defined(arch_atomic64_fetch_andnot_releas    3749 #if defined(arch_atomic64_fetch_andnot_release)
3750         return arch_atomic64_fetch_andnot_rel    3750         return arch_atomic64_fetch_andnot_release(i, v);
3751 #elif defined(arch_atomic64_fetch_andnot_rela    3751 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3752         __atomic_release_fence();                3752         __atomic_release_fence();
3753         return arch_atomic64_fetch_andnot_rel    3753         return arch_atomic64_fetch_andnot_relaxed(i, v);
3754 #elif defined(arch_atomic64_fetch_andnot)        3754 #elif defined(arch_atomic64_fetch_andnot)
3755         return arch_atomic64_fetch_andnot(i,     3755         return arch_atomic64_fetch_andnot(i, v);
3756 #else                                            3756 #else
3757         return raw_atomic64_fetch_and_release    3757         return raw_atomic64_fetch_and_release(~i, v);
3758 #endif                                           3758 #endif
3759 }                                                3759 }
3760                                                  3760 
3761 /**                                              3761 /**
3762  * raw_atomic64_fetch_andnot_relaxed() - atom    3762  * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
3763  * @i: s64 value                                 3763  * @i: s64 value
3764  * @v: pointer to atomic64_t                     3764  * @v: pointer to atomic64_t
3765  *                                               3765  *
3766  * Atomically updates @v to (@v & ~@i) with r    3766  * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3767  *                                               3767  *
3768  * Safe to use in noinstr code; prefer atomic    3768  * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
3769  *                                               3769  *
3770  * Return: The original value of @v.             3770  * Return: The original value of @v.
3771  */                                              3771  */
3772 static __always_inline s64                       3772 static __always_inline s64
3773 raw_atomic64_fetch_andnot_relaxed(s64 i, atom    3773 raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
3774 {                                                3774 {
3775 #if defined(arch_atomic64_fetch_andnot_relaxe    3775 #if defined(arch_atomic64_fetch_andnot_relaxed)
3776         return arch_atomic64_fetch_andnot_rel    3776         return arch_atomic64_fetch_andnot_relaxed(i, v);
3777 #elif defined(arch_atomic64_fetch_andnot)        3777 #elif defined(arch_atomic64_fetch_andnot)
3778         return arch_atomic64_fetch_andnot(i,     3778         return arch_atomic64_fetch_andnot(i, v);
3779 #else                                            3779 #else
3780         return raw_atomic64_fetch_and_relaxed    3780         return raw_atomic64_fetch_and_relaxed(~i, v);
3781 #endif                                           3781 #endif
3782 }                                                3782 }
3783                                                  3783 
3784 /**                                              3784 /**
3785  * raw_atomic64_or() - atomic bitwise OR with    3785  * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
3786  * @i: s64 value                                 3786  * @i: s64 value
3787  * @v: pointer to atomic64_t                     3787  * @v: pointer to atomic64_t
3788  *                                               3788  *
3789  * Atomically updates @v to (@v | @i) with re    3789  * Atomically updates @v to (@v | @i) with relaxed ordering.
3790  *                                               3790  *
3791  * Safe to use in noinstr code; prefer atomic    3791  * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
3792  *                                               3792  *
3793  * Return: Nothing.                              3793  * Return: Nothing.
3794  */                                              3794  */
3795 static __always_inline void                      3795 static __always_inline void
3796 raw_atomic64_or(s64 i, atomic64_t *v)            3796 raw_atomic64_or(s64 i, atomic64_t *v)
3797 {                                                3797 {
3798         arch_atomic64_or(i, v);                  3798         arch_atomic64_or(i, v);
3799 }                                                3799 }
3800                                                  3800 
3801 /**                                              3801 /**
3802  * raw_atomic64_fetch_or() - atomic bitwise O    3802  * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
3803  * @i: s64 value                                 3803  * @i: s64 value
3804  * @v: pointer to atomic64_t                     3804  * @v: pointer to atomic64_t
3805  *                                               3805  *
3806  * Atomically updates @v to (@v | @i) with fu    3806  * Atomically updates @v to (@v | @i) with full ordering.
3807  *                                               3807  *
3808  * Safe to use in noinstr code; prefer atomic    3808  * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
3809  *                                               3809  *
3810  * Return: The original value of @v.             3810  * Return: The original value of @v.
3811  */                                              3811  */
3812 static __always_inline s64                       3812 static __always_inline s64
3813 raw_atomic64_fetch_or(s64 i, atomic64_t *v)      3813 raw_atomic64_fetch_or(s64 i, atomic64_t *v)
3814 {                                                3814 {
3815 #if defined(arch_atomic64_fetch_or)              3815 #if defined(arch_atomic64_fetch_or)
3816         return arch_atomic64_fetch_or(i, v);     3816         return arch_atomic64_fetch_or(i, v);
3817 #elif defined(arch_atomic64_fetch_or_relaxed)    3817 #elif defined(arch_atomic64_fetch_or_relaxed)
3818         s64 ret;                                 3818         s64 ret;
3819         __atomic_pre_full_fence();               3819         __atomic_pre_full_fence();
3820         ret = arch_atomic64_fetch_or_relaxed(    3820         ret = arch_atomic64_fetch_or_relaxed(i, v);
3821         __atomic_post_full_fence();              3821         __atomic_post_full_fence();
3822         return ret;                              3822         return ret;
3823 #else                                            3823 #else
3824 #error "Unable to define raw_atomic64_fetch_o    3824 #error "Unable to define raw_atomic64_fetch_or"
3825 #endif                                           3825 #endif
3826 }                                                3826 }
3827                                                  3827 
3828 /**                                              3828 /**
3829  * raw_atomic64_fetch_or_acquire() - atomic b    3829  * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
3830  * @i: s64 value                                 3830  * @i: s64 value
3831  * @v: pointer to atomic64_t                     3831  * @v: pointer to atomic64_t
3832  *                                               3832  *
3833  * Atomically updates @v to (@v | @i) with ac    3833  * Atomically updates @v to (@v | @i) with acquire ordering.
3834  *                                               3834  *
3835  * Safe to use in noinstr code; prefer atomic    3835  * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
3836  *                                               3836  *
3837  * Return: The original value of @v.             3837  * Return: The original value of @v.
3838  */                                              3838  */
3839 static __always_inline s64                       3839 static __always_inline s64
3840 raw_atomic64_fetch_or_acquire(s64 i, atomic64    3840 raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3841 {                                                3841 {
3842 #if defined(arch_atomic64_fetch_or_acquire)      3842 #if defined(arch_atomic64_fetch_or_acquire)
3843         return arch_atomic64_fetch_or_acquire    3843         return arch_atomic64_fetch_or_acquire(i, v);
3844 #elif defined(arch_atomic64_fetch_or_relaxed)    3844 #elif defined(arch_atomic64_fetch_or_relaxed)
3845         s64 ret = arch_atomic64_fetch_or_rela    3845         s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3846         __atomic_acquire_fence();                3846         __atomic_acquire_fence();
3847         return ret;                              3847         return ret;
3848 #elif defined(arch_atomic64_fetch_or)            3848 #elif defined(arch_atomic64_fetch_or)
3849         return arch_atomic64_fetch_or(i, v);     3849         return arch_atomic64_fetch_or(i, v);
3850 #else                                            3850 #else
3851 #error "Unable to define raw_atomic64_fetch_o    3851 #error "Unable to define raw_atomic64_fetch_or_acquire"
3852 #endif                                           3852 #endif
3853 }                                                3853 }
3854                                                  3854 
3855 /**                                              3855 /**
3856  * raw_atomic64_fetch_or_release() - atomic b    3856  * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
3857  * @i: s64 value                                 3857  * @i: s64 value
3858  * @v: pointer to atomic64_t                     3858  * @v: pointer to atomic64_t
3859  *                                               3859  *
3860  * Atomically updates @v to (@v | @i) with re    3860  * Atomically updates @v to (@v | @i) with release ordering.
3861  *                                               3861  *
3862  * Safe to use in noinstr code; prefer atomic    3862  * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
3863  *                                               3863  *
3864  * Return: The original value of @v.             3864  * Return: The original value of @v.
3865  */                                              3865  */
3866 static __always_inline s64                       3866 static __always_inline s64
3867 raw_atomic64_fetch_or_release(s64 i, atomic64    3867 raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3868 {                                                3868 {
3869 #if defined(arch_atomic64_fetch_or_release)      3869 #if defined(arch_atomic64_fetch_or_release)
3870         return arch_atomic64_fetch_or_release    3870         return arch_atomic64_fetch_or_release(i, v);
3871 #elif defined(arch_atomic64_fetch_or_relaxed)    3871 #elif defined(arch_atomic64_fetch_or_relaxed)
3872         __atomic_release_fence();                3872         __atomic_release_fence();
3873         return arch_atomic64_fetch_or_relaxed    3873         return arch_atomic64_fetch_or_relaxed(i, v);
3874 #elif defined(arch_atomic64_fetch_or)            3874 #elif defined(arch_atomic64_fetch_or)
3875         return arch_atomic64_fetch_or(i, v);     3875         return arch_atomic64_fetch_or(i, v);
3876 #else                                            3876 #else
3877 #error "Unable to define raw_atomic64_fetch_o    3877 #error "Unable to define raw_atomic64_fetch_or_release"
3878 #endif                                           3878 #endif
3879 }                                                3879 }
3880                                                  3880 
3881 /**                                              3881 /**
3882  * raw_atomic64_fetch_or_relaxed() - atomic b    3882  * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
3883  * @i: s64 value                                 3883  * @i: s64 value
3884  * @v: pointer to atomic64_t                     3884  * @v: pointer to atomic64_t
3885  *                                               3885  *
3886  * Atomically updates @v to (@v | @i) with re    3886  * Atomically updates @v to (@v | @i) with relaxed ordering.
3887  *                                               3887  *
3888  * Safe to use in noinstr code; prefer atomic    3888  * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
3889  *                                               3889  *
3890  * Return: The original value of @v.             3890  * Return: The original value of @v.
3891  */                                              3891  */
3892 static __always_inline s64                       3892 static __always_inline s64
3893 raw_atomic64_fetch_or_relaxed(s64 i, atomic64    3893 raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
3894 {                                                3894 {
3895 #if defined(arch_atomic64_fetch_or_relaxed)      3895 #if defined(arch_atomic64_fetch_or_relaxed)
3896         return arch_atomic64_fetch_or_relaxed    3896         return arch_atomic64_fetch_or_relaxed(i, v);
3897 #elif defined(arch_atomic64_fetch_or)            3897 #elif defined(arch_atomic64_fetch_or)
3898         return arch_atomic64_fetch_or(i, v);     3898         return arch_atomic64_fetch_or(i, v);
3899 #else                                            3899 #else
3900 #error "Unable to define raw_atomic64_fetch_o    3900 #error "Unable to define raw_atomic64_fetch_or_relaxed"
3901 #endif                                           3901 #endif
3902 }                                                3902 }
3903                                                  3903 
3904 /**                                              3904 /**
3905  * raw_atomic64_xor() - atomic bitwise XOR wi    3905  * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
3906  * @i: s64 value                                 3906  * @i: s64 value
3907  * @v: pointer to atomic64_t                     3907  * @v: pointer to atomic64_t
3908  *                                               3908  *
3909  * Atomically updates @v to (@v ^ @i) with re    3909  * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3910  *                                               3910  *
3911  * Safe to use in noinstr code; prefer atomic    3911  * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
3912  *                                               3912  *
3913  * Return: Nothing.                              3913  * Return: Nothing.
3914  */                                              3914  */
3915 static __always_inline void                      3915 static __always_inline void
3916 raw_atomic64_xor(s64 i, atomic64_t *v)           3916 raw_atomic64_xor(s64 i, atomic64_t *v)
3917 {                                                3917 {
3918         arch_atomic64_xor(i, v);                 3918         arch_atomic64_xor(i, v);
3919 }                                                3919 }
3920                                                  3920 
3921 /**                                              3921 /**
3922  * raw_atomic64_fetch_xor() - atomic bitwise     3922  * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
3923  * @i: s64 value                                 3923  * @i: s64 value
3924  * @v: pointer to atomic64_t                     3924  * @v: pointer to atomic64_t
3925  *                                               3925  *
3926  * Atomically updates @v to (@v ^ @i) with fu    3926  * Atomically updates @v to (@v ^ @i) with full ordering.
3927  *                                               3927  *
3928  * Safe to use in noinstr code; prefer atomic    3928  * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
3929  *                                               3929  *
3930  * Return: The original value of @v.             3930  * Return: The original value of @v.
3931  */                                              3931  */
3932 static __always_inline s64                       3932 static __always_inline s64
3933 raw_atomic64_fetch_xor(s64 i, atomic64_t *v)     3933 raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
3934 {                                                3934 {
3935 #if defined(arch_atomic64_fetch_xor)             3935 #if defined(arch_atomic64_fetch_xor)
3936         return arch_atomic64_fetch_xor(i, v);    3936         return arch_atomic64_fetch_xor(i, v);
3937 #elif defined(arch_atomic64_fetch_xor_relaxed    3937 #elif defined(arch_atomic64_fetch_xor_relaxed)
3938         s64 ret;                                 3938         s64 ret;
3939         __atomic_pre_full_fence();               3939         __atomic_pre_full_fence();
3940         ret = arch_atomic64_fetch_xor_relaxed    3940         ret = arch_atomic64_fetch_xor_relaxed(i, v);
3941         __atomic_post_full_fence();              3941         __atomic_post_full_fence();
3942         return ret;                              3942         return ret;
3943 #else                                            3943 #else
3944 #error "Unable to define raw_atomic64_fetch_x    3944 #error "Unable to define raw_atomic64_fetch_xor"
3945 #endif                                           3945 #endif
3946 }                                                3946 }
3947                                                  3947 
3948 /**                                              3948 /**
3949  * raw_atomic64_fetch_xor_acquire() - atomic     3949  * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
3950  * @i: s64 value                                 3950  * @i: s64 value
3951  * @v: pointer to atomic64_t                     3951  * @v: pointer to atomic64_t
3952  *                                               3952  *
3953  * Atomically updates @v to (@v ^ @i) with ac    3953  * Atomically updates @v to (@v ^ @i) with acquire ordering.
3954  *                                               3954  *
3955  * Safe to use in noinstr code; prefer atomic    3955  * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
3956  *                                               3956  *
3957  * Return: The original value of @v.             3957  * Return: The original value of @v.
3958  */                                              3958  */
3959 static __always_inline s64                       3959 static __always_inline s64
3960 raw_atomic64_fetch_xor_acquire(s64 i, atomic6    3960 raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3961 {                                                3961 {
3962 #if defined(arch_atomic64_fetch_xor_acquire)     3962 #if defined(arch_atomic64_fetch_xor_acquire)
3963         return arch_atomic64_fetch_xor_acquir    3963         return arch_atomic64_fetch_xor_acquire(i, v);
3964 #elif defined(arch_atomic64_fetch_xor_relaxed    3964 #elif defined(arch_atomic64_fetch_xor_relaxed)
3965         s64 ret = arch_atomic64_fetch_xor_rel    3965         s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3966         __atomic_acquire_fence();                3966         __atomic_acquire_fence();
3967         return ret;                              3967         return ret;
3968 #elif defined(arch_atomic64_fetch_xor)           3968 #elif defined(arch_atomic64_fetch_xor)
3969         return arch_atomic64_fetch_xor(i, v);    3969         return arch_atomic64_fetch_xor(i, v);
3970 #else                                            3970 #else
3971 #error "Unable to define raw_atomic64_fetch_x    3971 #error "Unable to define raw_atomic64_fetch_xor_acquire"
3972 #endif                                           3972 #endif
3973 }                                                3973 }
3974                                                  3974 
3975 /**                                              3975 /**
3976  * raw_atomic64_fetch_xor_release() - atomic     3976  * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
3977  * @i: s64 value                                 3977  * @i: s64 value
3978  * @v: pointer to atomic64_t                     3978  * @v: pointer to atomic64_t
3979  *                                               3979  *
3980  * Atomically updates @v to (@v ^ @i) with re    3980  * Atomically updates @v to (@v ^ @i) with release ordering.
3981  *                                               3981  *
3982  * Safe to use in noinstr code; prefer atomic    3982  * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
3983  *                                               3983  *
3984  * Return: The original value of @v.             3984  * Return: The original value of @v.
3985  */                                              3985  */
3986 static __always_inline s64                       3986 static __always_inline s64
3987 raw_atomic64_fetch_xor_release(s64 i, atomic6    3987 raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3988 {                                                3988 {
3989 #if defined(arch_atomic64_fetch_xor_release)     3989 #if defined(arch_atomic64_fetch_xor_release)
3990         return arch_atomic64_fetch_xor_releas    3990         return arch_atomic64_fetch_xor_release(i, v);
3991 #elif defined(arch_atomic64_fetch_xor_relaxed    3991 #elif defined(arch_atomic64_fetch_xor_relaxed)
3992         __atomic_release_fence();                3992         __atomic_release_fence();
3993         return arch_atomic64_fetch_xor_relaxe    3993         return arch_atomic64_fetch_xor_relaxed(i, v);
3994 #elif defined(arch_atomic64_fetch_xor)           3994 #elif defined(arch_atomic64_fetch_xor)
3995         return arch_atomic64_fetch_xor(i, v);    3995         return arch_atomic64_fetch_xor(i, v);
3996 #else                                            3996 #else
3997 #error "Unable to define raw_atomic64_fetch_x    3997 #error "Unable to define raw_atomic64_fetch_xor_release"
3998 #endif                                           3998 #endif
3999 }                                                3999 }
4000                                                  4000 
4001 /**                                              4001 /**
4002  * raw_atomic64_fetch_xor_relaxed() - atomic     4002  * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
4003  * @i: s64 value                                 4003  * @i: s64 value
4004  * @v: pointer to atomic64_t                     4004  * @v: pointer to atomic64_t
4005  *                                               4005  *
4006  * Atomically updates @v to (@v ^ @i) with re    4006  * Atomically updates @v to (@v ^ @i) with relaxed ordering.
4007  *                                               4007  *
4008  * Safe to use in noinstr code; prefer atomic    4008  * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
4009  *                                               4009  *
4010  * Return: The original value of @v.             4010  * Return: The original value of @v.
4011  */                                              4011  */
4012 static __always_inline s64                       4012 static __always_inline s64
4013 raw_atomic64_fetch_xor_relaxed(s64 i, atomic6    4013 raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
4014 {                                                4014 {
4015 #if defined(arch_atomic64_fetch_xor_relaxed)     4015 #if defined(arch_atomic64_fetch_xor_relaxed)
4016         return arch_atomic64_fetch_xor_relaxe    4016         return arch_atomic64_fetch_xor_relaxed(i, v);
4017 #elif defined(arch_atomic64_fetch_xor)           4017 #elif defined(arch_atomic64_fetch_xor)
4018         return arch_atomic64_fetch_xor(i, v);    4018         return arch_atomic64_fetch_xor(i, v);
4019 #else                                            4019 #else
4020 #error "Unable to define raw_atomic64_fetch_x    4020 #error "Unable to define raw_atomic64_fetch_xor_relaxed"
4021 #endif                                           4021 #endif
4022 }                                                4022 }
4023                                                  4023 
4024 /**                                              4024 /**
4025  * raw_atomic64_xchg() - atomic exchange with    4025  * raw_atomic64_xchg() - atomic exchange with full ordering
4026  * @v: pointer to atomic64_t                     4026  * @v: pointer to atomic64_t
4027  * @new: s64 value to assign                     4027  * @new: s64 value to assign
4028  *                                               4028  *
4029  * Atomically updates @v to @new with full or    4029  * Atomically updates @v to @new with full ordering.
4030  *                                               4030  *
4031  * Safe to use in noinstr code; prefer atomic    4031  * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
4032  *                                               4032  *
4033  * Return: The original value of @v.             4033  * Return: The original value of @v.
4034  */                                              4034  */
4035 static __always_inline s64                       4035 static __always_inline s64
4036 raw_atomic64_xchg(atomic64_t *v, s64 new)        4036 raw_atomic64_xchg(atomic64_t *v, s64 new)
4037 {                                                4037 {
4038 #if defined(arch_atomic64_xchg)                  4038 #if defined(arch_atomic64_xchg)
4039         return arch_atomic64_xchg(v, new);       4039         return arch_atomic64_xchg(v, new);
4040 #elif defined(arch_atomic64_xchg_relaxed)        4040 #elif defined(arch_atomic64_xchg_relaxed)
4041         s64 ret;                                 4041         s64 ret;
4042         __atomic_pre_full_fence();               4042         __atomic_pre_full_fence();
4043         ret = arch_atomic64_xchg_relaxed(v, n    4043         ret = arch_atomic64_xchg_relaxed(v, new);
4044         __atomic_post_full_fence();              4044         __atomic_post_full_fence();
4045         return ret;                              4045         return ret;
4046 #else                                            4046 #else
4047         return raw_xchg(&v->counter, new);       4047         return raw_xchg(&v->counter, new);
4048 #endif                                           4048 #endif
4049 }                                                4049 }
4050                                                  4050 
4051 /**                                              4051 /**
4052  * raw_atomic64_xchg_acquire() - atomic excha    4052  * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
4053  * @v: pointer to atomic64_t                     4053  * @v: pointer to atomic64_t
4054  * @new: s64 value to assign                     4054  * @new: s64 value to assign
4055  *                                               4055  *
4056  * Atomically updates @v to @new with acquire    4056  * Atomically updates @v to @new with acquire ordering.
4057  *                                               4057  *
4058  * Safe to use in noinstr code; prefer atomic    4058  * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
4059  *                                               4059  *
4060  * Return: The original value of @v.             4060  * Return: The original value of @v.
4061  */                                              4061  */
4062 static __always_inline s64                       4062 static __always_inline s64
4063 raw_atomic64_xchg_acquire(atomic64_t *v, s64     4063 raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
4064 {                                                4064 {
4065 #if defined(arch_atomic64_xchg_acquire)          4065 #if defined(arch_atomic64_xchg_acquire)
4066         return arch_atomic64_xchg_acquire(v,     4066         return arch_atomic64_xchg_acquire(v, new);
4067 #elif defined(arch_atomic64_xchg_relaxed)        4067 #elif defined(arch_atomic64_xchg_relaxed)
4068         s64 ret = arch_atomic64_xchg_relaxed(    4068         s64 ret = arch_atomic64_xchg_relaxed(v, new);
4069         __atomic_acquire_fence();                4069         __atomic_acquire_fence();
4070         return ret;                              4070         return ret;
4071 #elif defined(arch_atomic64_xchg)                4071 #elif defined(arch_atomic64_xchg)
4072         return arch_atomic64_xchg(v, new);       4072         return arch_atomic64_xchg(v, new);
4073 #else                                            4073 #else
4074         return raw_xchg_acquire(&v->counter,     4074         return raw_xchg_acquire(&v->counter, new);
4075 #endif                                           4075 #endif
4076 }                                                4076 }
4077                                                  4077 
4078 /**                                              4078 /**
4079  * raw_atomic64_xchg_release() - atomic excha    4079  * raw_atomic64_xchg_release() - atomic exchange with release ordering
4080  * @v: pointer to atomic64_t                     4080  * @v: pointer to atomic64_t
4081  * @new: s64 value to assign                     4081  * @new: s64 value to assign
4082  *                                               4082  *
4083  * Atomically updates @v to @new with release    4083  * Atomically updates @v to @new with release ordering.
4084  *                                               4084  *
4085  * Safe to use in noinstr code; prefer atomic    4085  * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
4086  *                                               4086  *
4087  * Return: The original value of @v.             4087  * Return: The original value of @v.
4088  */                                              4088  */
4089 static __always_inline s64                       4089 static __always_inline s64
4090 raw_atomic64_xchg_release(atomic64_t *v, s64     4090 raw_atomic64_xchg_release(atomic64_t *v, s64 new)
4091 {                                                4091 {
4092 #if defined(arch_atomic64_xchg_release)          4092 #if defined(arch_atomic64_xchg_release)
4093         return arch_atomic64_xchg_release(v,     4093         return arch_atomic64_xchg_release(v, new);
4094 #elif defined(arch_atomic64_xchg_relaxed)        4094 #elif defined(arch_atomic64_xchg_relaxed)
4095         __atomic_release_fence();                4095         __atomic_release_fence();
4096         return arch_atomic64_xchg_relaxed(v,     4096         return arch_atomic64_xchg_relaxed(v, new);
4097 #elif defined(arch_atomic64_xchg)                4097 #elif defined(arch_atomic64_xchg)
4098         return arch_atomic64_xchg(v, new);       4098         return arch_atomic64_xchg(v, new);
4099 #else                                            4099 #else
4100         return raw_xchg_release(&v->counter,     4100         return raw_xchg_release(&v->counter, new);
4101 #endif                                           4101 #endif
4102 }                                                4102 }
4103                                                  4103 
4104 /**                                              4104 /**
4105  * raw_atomic64_xchg_relaxed() - atomic excha    4105  * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
4106  * @v: pointer to atomic64_t                     4106  * @v: pointer to atomic64_t
4107  * @new: s64 value to assign                     4107  * @new: s64 value to assign
4108  *                                               4108  *
4109  * Atomically updates @v to @new with relaxed    4109  * Atomically updates @v to @new with relaxed ordering.
4110  *                                               4110  *
4111  * Safe to use in noinstr code; prefer atomic    4111  * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
4112  *                                               4112  *
4113  * Return: The original value of @v.             4113  * Return: The original value of @v.
4114  */                                              4114  */
4115 static __always_inline s64                       4115 static __always_inline s64
4116 raw_atomic64_xchg_relaxed(atomic64_t *v, s64     4116 raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
4117 {                                                4117 {
4118 #if defined(arch_atomic64_xchg_relaxed)          4118 #if defined(arch_atomic64_xchg_relaxed)
4119         return arch_atomic64_xchg_relaxed(v,     4119         return arch_atomic64_xchg_relaxed(v, new);
4120 #elif defined(arch_atomic64_xchg)                4120 #elif defined(arch_atomic64_xchg)
4121         return arch_atomic64_xchg(v, new);       4121         return arch_atomic64_xchg(v, new);
4122 #else                                            4122 #else
4123         return raw_xchg_relaxed(&v->counter,     4123         return raw_xchg_relaxed(&v->counter, new);
4124 #endif                                           4124 #endif
4125 }                                                4125 }
4126                                                  4126 
4127 /**                                              4127 /**
4128  * raw_atomic64_cmpxchg() - atomic compare an    4128  * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
4129  * @v: pointer to atomic64_t                     4129  * @v: pointer to atomic64_t
4130  * @old: s64 value to compare with               4130  * @old: s64 value to compare with
4131  * @new: s64 value to assign                     4131  * @new: s64 value to assign
4132  *                                               4132  *
4133  * If (@v == @old), atomically updates @v to     4133  * If (@v == @old), atomically updates @v to @new with full ordering.
4134  * Otherwise, @v is not modified and relaxed     4134  * Otherwise, @v is not modified and relaxed ordering is provided.
4135  *                                               4135  *
4136  * Safe to use in noinstr code; prefer atomic    4136  * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
4137  *                                               4137  *
4138  * Return: The original value of @v.             4138  * Return: The original value of @v.
4139  */                                              4139  */
4140 static __always_inline s64                       4140 static __always_inline s64
4141 raw_atomic64_cmpxchg(atomic64_t *v, s64 old,     4141 raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4142 {                                                4142 {
4143 #if defined(arch_atomic64_cmpxchg)               4143 #if defined(arch_atomic64_cmpxchg)
4144         return arch_atomic64_cmpxchg(v, old,     4144         return arch_atomic64_cmpxchg(v, old, new);
4145 #elif defined(arch_atomic64_cmpxchg_relaxed)     4145 #elif defined(arch_atomic64_cmpxchg_relaxed)
4146         s64 ret;                                 4146         s64 ret;
4147         __atomic_pre_full_fence();               4147         __atomic_pre_full_fence();
4148         ret = arch_atomic64_cmpxchg_relaxed(v    4148         ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4149         __atomic_post_full_fence();              4149         __atomic_post_full_fence();
4150         return ret;                              4150         return ret;
4151 #else                                            4151 #else
4152         return raw_cmpxchg(&v->counter, old,     4152         return raw_cmpxchg(&v->counter, old, new);
4153 #endif                                           4153 #endif
4154 }                                                4154 }
4155                                                  4155 
4156 /**                                              4156 /**
4157  * raw_atomic64_cmpxchg_acquire() - atomic co    4157  * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4158  * @v: pointer to atomic64_t                     4158  * @v: pointer to atomic64_t
4159  * @old: s64 value to compare with               4159  * @old: s64 value to compare with
4160  * @new: s64 value to assign                     4160  * @new: s64 value to assign
4161  *                                               4161  *
4162  * If (@v == @old), atomically updates @v to     4162  * If (@v == @old), atomically updates @v to @new with acquire ordering.
4163  * Otherwise, @v is not modified and relaxed     4163  * Otherwise, @v is not modified and relaxed ordering is provided.
4164  *                                               4164  *
4165  * Safe to use in noinstr code; prefer atomic    4165  * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
4166  *                                               4166  *
4167  * Return: The original value of @v.             4167  * Return: The original value of @v.
4168  */                                              4168  */
4169 static __always_inline s64                       4169 static __always_inline s64
4170 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s    4170 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4171 {                                                4171 {
4172 #if defined(arch_atomic64_cmpxchg_acquire)       4172 #if defined(arch_atomic64_cmpxchg_acquire)
4173         return arch_atomic64_cmpxchg_acquire(    4173         return arch_atomic64_cmpxchg_acquire(v, old, new);
4174 #elif defined(arch_atomic64_cmpxchg_relaxed)     4174 #elif defined(arch_atomic64_cmpxchg_relaxed)
4175         s64 ret = arch_atomic64_cmpxchg_relax    4175         s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4176         __atomic_acquire_fence();                4176         __atomic_acquire_fence();
4177         return ret;                              4177         return ret;
4178 #elif defined(arch_atomic64_cmpxchg)             4178 #elif defined(arch_atomic64_cmpxchg)
4179         return arch_atomic64_cmpxchg(v, old,     4179         return arch_atomic64_cmpxchg(v, old, new);
4180 #else                                            4180 #else
4181         return raw_cmpxchg_acquire(&v->counte    4181         return raw_cmpxchg_acquire(&v->counter, old, new);
4182 #endif                                           4182 #endif
4183 }                                                4183 }
4184                                                  4184 
4185 /**                                              4185 /**
4186  * raw_atomic64_cmpxchg_release() - atomic co    4186  * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
4187  * @v: pointer to atomic64_t                     4187  * @v: pointer to atomic64_t
4188  * @old: s64 value to compare with               4188  * @old: s64 value to compare with
4189  * @new: s64 value to assign                     4189  * @new: s64 value to assign
4190  *                                               4190  *
4191  * If (@v == @old), atomically updates @v to     4191  * If (@v == @old), atomically updates @v to @new with release ordering.
4192  * Otherwise, @v is not modified and relaxed     4192  * Otherwise, @v is not modified and relaxed ordering is provided.
4193  *                                               4193  *
4194  * Safe to use in noinstr code; prefer atomic    4194  * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
4195  *                                               4195  *
4196  * Return: The original value of @v.             4196  * Return: The original value of @v.
4197  */                                              4197  */
4198 static __always_inline s64                       4198 static __always_inline s64
4199 raw_atomic64_cmpxchg_release(atomic64_t *v, s    4199 raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4200 {                                                4200 {
4201 #if defined(arch_atomic64_cmpxchg_release)       4201 #if defined(arch_atomic64_cmpxchg_release)
4202         return arch_atomic64_cmpxchg_release(    4202         return arch_atomic64_cmpxchg_release(v, old, new);
4203 #elif defined(arch_atomic64_cmpxchg_relaxed)     4203 #elif defined(arch_atomic64_cmpxchg_relaxed)
4204         __atomic_release_fence();                4204         __atomic_release_fence();
4205         return arch_atomic64_cmpxchg_relaxed(    4205         return arch_atomic64_cmpxchg_relaxed(v, old, new);
4206 #elif defined(arch_atomic64_cmpxchg)             4206 #elif defined(arch_atomic64_cmpxchg)
4207         return arch_atomic64_cmpxchg(v, old,     4207         return arch_atomic64_cmpxchg(v, old, new);
4208 #else                                            4208 #else
4209         return raw_cmpxchg_release(&v->counte    4209         return raw_cmpxchg_release(&v->counter, old, new);
4210 #endif                                           4210 #endif
4211 }                                                4211 }
4212                                                  4212 
4213 /**                                              4213 /**
4214  * raw_atomic64_cmpxchg_relaxed() - atomic co    4214  * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4215  * @v: pointer to atomic64_t                     4215  * @v: pointer to atomic64_t
4216  * @old: s64 value to compare with               4216  * @old: s64 value to compare with
4217  * @new: s64 value to assign                     4217  * @new: s64 value to assign
4218  *                                               4218  *
4219  * If (@v == @old), atomically updates @v to     4219  * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4220  * Otherwise, @v is not modified and relaxed     4220  * Otherwise, @v is not modified and relaxed ordering is provided.
4221  *                                               4221  *
4222  * Safe to use in noinstr code; prefer atomic    4222  * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
4223  *                                               4223  *
4224  * Return: The original value of @v.             4224  * Return: The original value of @v.
4225  */                                              4225  */
4226 static __always_inline s64                       4226 static __always_inline s64
4227 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s    4227 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
4228 {                                                4228 {
4229 #if defined(arch_atomic64_cmpxchg_relaxed)       4229 #if defined(arch_atomic64_cmpxchg_relaxed)
4230         return arch_atomic64_cmpxchg_relaxed(    4230         return arch_atomic64_cmpxchg_relaxed(v, old, new);
4231 #elif defined(arch_atomic64_cmpxchg)             4231 #elif defined(arch_atomic64_cmpxchg)
4232         return arch_atomic64_cmpxchg(v, old,     4232         return arch_atomic64_cmpxchg(v, old, new);
4233 #else                                            4233 #else
4234         return raw_cmpxchg_relaxed(&v->counte    4234         return raw_cmpxchg_relaxed(&v->counter, old, new);
4235 #endif                                           4235 #endif
4236 }                                                4236 }
4237                                                  4237 
4238 /**                                              4238 /**
4239  * raw_atomic64_try_cmpxchg() - atomic compar    4239  * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
4240  * @v: pointer to atomic64_t                     4240  * @v: pointer to atomic64_t
4241  * @old: pointer to s64 value to compare with    4241  * @old: pointer to s64 value to compare with
4242  * @new: s64 value to assign                     4242  * @new: s64 value to assign
4243  *                                               4243  *
4244  * If (@v == @old), atomically updates @v to     4244  * If (@v == @old), atomically updates @v to @new with full ordering.
4245  * Otherwise, @v is not modified, @old is upd    4245  * Otherwise, @v is not modified, @old is updated to the current value of @v,
4246  * and relaxed ordering is provided.             4246  * and relaxed ordering is provided.
4247  *                                               4247  *
4248  * Safe to use in noinstr code; prefer atomic    4248  * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
4249  *                                               4249  *
4250  * Return: @true if the exchange occured, @fa    4250  * Return: @true if the exchange occured, @false otherwise.
4251  */                                              4251  */
4252 static __always_inline bool                      4252 static __always_inline bool
4253 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *    4253 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4254 {                                                4254 {
4255 #if defined(arch_atomic64_try_cmpxchg)           4255 #if defined(arch_atomic64_try_cmpxchg)
4256         return arch_atomic64_try_cmpxchg(v, o    4256         return arch_atomic64_try_cmpxchg(v, old, new);
4257 #elif defined(arch_atomic64_try_cmpxchg_relax    4257 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4258         bool ret;                                4258         bool ret;
4259         __atomic_pre_full_fence();               4259         __atomic_pre_full_fence();
4260         ret = arch_atomic64_try_cmpxchg_relax    4260         ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4261         __atomic_post_full_fence();              4261         __atomic_post_full_fence();
4262         return ret;                              4262         return ret;
4263 #else                                            4263 #else
4264         s64 r, o = *old;                         4264         s64 r, o = *old;
4265         r = raw_atomic64_cmpxchg(v, o, new);     4265         r = raw_atomic64_cmpxchg(v, o, new);
4266         if (unlikely(r != o))                    4266         if (unlikely(r != o))
4267                 *old = r;                        4267                 *old = r;
4268         return likely(r == o);                   4268         return likely(r == o);
4269 #endif                                           4269 #endif
4270 }                                                4270 }
4271                                                  4271 
4272 /**                                              4272 /**
4273  * raw_atomic64_try_cmpxchg_acquire() - atomi    4273  * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4274  * @v: pointer to atomic64_t                     4274  * @v: pointer to atomic64_t
4275  * @old: pointer to s64 value to compare with    4275  * @old: pointer to s64 value to compare with
4276  * @new: s64 value to assign                     4276  * @new: s64 value to assign
4277  *                                               4277  *
4278  * If (@v == @old), atomically updates @v to     4278  * If (@v == @old), atomically updates @v to @new with acquire ordering.
4279  * Otherwise, @v is not modified, @old is upd    4279  * Otherwise, @v is not modified, @old is updated to the current value of @v,
4280  * and relaxed ordering is provided.             4280  * and relaxed ordering is provided.
4281  *                                               4281  *
4282  * Safe to use in noinstr code; prefer atomic    4282  * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
4283  *                                               4283  *
4284  * Return: @true if the exchange occured, @fa    4284  * Return: @true if the exchange occured, @false otherwise.
4285  */                                              4285  */
4286 static __always_inline bool                      4286 static __always_inline bool
4287 raw_atomic64_try_cmpxchg_acquire(atomic64_t *    4287 raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4288 {                                                4288 {
4289 #if defined(arch_atomic64_try_cmpxchg_acquire    4289 #if defined(arch_atomic64_try_cmpxchg_acquire)
4290         return arch_atomic64_try_cmpxchg_acqu    4290         return arch_atomic64_try_cmpxchg_acquire(v, old, new);
4291 #elif defined(arch_atomic64_try_cmpxchg_relax    4291 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4292         bool ret = arch_atomic64_try_cmpxchg_    4292         bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4293         __atomic_acquire_fence();                4293         __atomic_acquire_fence();
4294         return ret;                              4294         return ret;
4295 #elif defined(arch_atomic64_try_cmpxchg)         4295 #elif defined(arch_atomic64_try_cmpxchg)
4296         return arch_atomic64_try_cmpxchg(v, o    4296         return arch_atomic64_try_cmpxchg(v, old, new);
4297 #else                                            4297 #else
4298         s64 r, o = *old;                         4298         s64 r, o = *old;
4299         r = raw_atomic64_cmpxchg_acquire(v, o    4299         r = raw_atomic64_cmpxchg_acquire(v, o, new);
4300         if (unlikely(r != o))                    4300         if (unlikely(r != o))
4301                 *old = r;                        4301                 *old = r;
4302         return likely(r == o);                   4302         return likely(r == o);
4303 #endif                                           4303 #endif
4304 }                                                4304 }
4305                                                  4305 
4306 /**                                              4306 /**
4307  * raw_atomic64_try_cmpxchg_release() - atomi    4307  * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
4308  * @v: pointer to atomic64_t                     4308  * @v: pointer to atomic64_t
4309  * @old: pointer to s64 value to compare with    4309  * @old: pointer to s64 value to compare with
4310  * @new: s64 value to assign                     4310  * @new: s64 value to assign
4311  *                                               4311  *
4312  * If (@v == @old), atomically updates @v to     4312  * If (@v == @old), atomically updates @v to @new with release ordering.
4313  * Otherwise, @v is not modified, @old is upd    4313  * Otherwise, @v is not modified, @old is updated to the current value of @v,
4314  * and relaxed ordering is provided.             4314  * and relaxed ordering is provided.
4315  *                                               4315  *
4316  * Safe to use in noinstr code; prefer atomic    4316  * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
4317  *                                               4317  *
4318  * Return: @true if the exchange occured, @fa    4318  * Return: @true if the exchange occured, @false otherwise.
4319  */                                              4319  */
4320 static __always_inline bool                      4320 static __always_inline bool
4321 raw_atomic64_try_cmpxchg_release(atomic64_t *    4321 raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4322 {                                                4322 {
4323 #if defined(arch_atomic64_try_cmpxchg_release    4323 #if defined(arch_atomic64_try_cmpxchg_release)
4324         return arch_atomic64_try_cmpxchg_rele    4324         return arch_atomic64_try_cmpxchg_release(v, old, new);
4325 #elif defined(arch_atomic64_try_cmpxchg_relax    4325 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4326         __atomic_release_fence();                4326         __atomic_release_fence();
4327         return arch_atomic64_try_cmpxchg_rela    4327         return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4328 #elif defined(arch_atomic64_try_cmpxchg)         4328 #elif defined(arch_atomic64_try_cmpxchg)
4329         return arch_atomic64_try_cmpxchg(v, o    4329         return arch_atomic64_try_cmpxchg(v, old, new);
4330 #else                                            4330 #else
4331         s64 r, o = *old;                         4331         s64 r, o = *old;
4332         r = raw_atomic64_cmpxchg_release(v, o    4332         r = raw_atomic64_cmpxchg_release(v, o, new);
4333         if (unlikely(r != o))                    4333         if (unlikely(r != o))
4334                 *old = r;                        4334                 *old = r;
4335         return likely(r == o);                   4335         return likely(r == o);
4336 #endif                                           4336 #endif
4337 }                                                4337 }
4338                                                  4338 
4339 /**                                              4339 /**
4340  * raw_atomic64_try_cmpxchg_relaxed() - atomi    4340  * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4341  * @v: pointer to atomic64_t                     4341  * @v: pointer to atomic64_t
4342  * @old: pointer to s64 value to compare with    4342  * @old: pointer to s64 value to compare with
4343  * @new: s64 value to assign                     4343  * @new: s64 value to assign
4344  *                                               4344  *
4345  * If (@v == @old), atomically updates @v to     4345  * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4346  * Otherwise, @v is not modified, @old is upd    4346  * Otherwise, @v is not modified, @old is updated to the current value of @v,
4347  * and relaxed ordering is provided.             4347  * and relaxed ordering is provided.
4348  *                                               4348  *
4349  * Safe to use in noinstr code; prefer atomic    4349  * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
4350  *                                               4350  *
4351  * Return: @true if the exchange occured, @fa    4351  * Return: @true if the exchange occured, @false otherwise.
4352  */                                              4352  */
4353 static __always_inline bool                      4353 static __always_inline bool
4354 raw_atomic64_try_cmpxchg_relaxed(atomic64_t *    4354 raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
4355 {                                                4355 {
4356 #if defined(arch_atomic64_try_cmpxchg_relaxed    4356 #if defined(arch_atomic64_try_cmpxchg_relaxed)
4357         return arch_atomic64_try_cmpxchg_rela    4357         return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4358 #elif defined(arch_atomic64_try_cmpxchg)         4358 #elif defined(arch_atomic64_try_cmpxchg)
4359         return arch_atomic64_try_cmpxchg(v, o    4359         return arch_atomic64_try_cmpxchg(v, old, new);
4360 #else                                            4360 #else
4361         s64 r, o = *old;                         4361         s64 r, o = *old;
4362         r = raw_atomic64_cmpxchg_relaxed(v, o    4362         r = raw_atomic64_cmpxchg_relaxed(v, o, new);
4363         if (unlikely(r != o))                    4363         if (unlikely(r != o))
4364                 *old = r;                        4364                 *old = r;
4365         return likely(r == o);                   4365         return likely(r == o);
4366 #endif                                           4366 #endif
4367 }                                                4367 }
4368                                                  4368 
4369 /**                                              4369 /**
4370  * raw_atomic64_sub_and_test() - atomic subtr    4370  * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
4371  * @i: s64 value to subtract                     4371  * @i: s64 value to subtract
4372  * @v: pointer to atomic64_t                     4372  * @v: pointer to atomic64_t
4373  *                                               4373  *
4374  * Atomically updates @v to (@v - @i) with fu    4374  * Atomically updates @v to (@v - @i) with full ordering.
4375  *                                               4375  *
4376  * Safe to use in noinstr code; prefer atomic    4376  * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
4377  *                                               4377  *
4378  * Return: @true if the resulting value of @v    4378  * Return: @true if the resulting value of @v is zero, @false otherwise.
4379  */                                              4379  */
4380 static __always_inline bool                      4380 static __always_inline bool
4381 raw_atomic64_sub_and_test(s64 i, atomic64_t *    4381 raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
4382 {                                                4382 {
4383 #if defined(arch_atomic64_sub_and_test)          4383 #if defined(arch_atomic64_sub_and_test)
4384         return arch_atomic64_sub_and_test(i,     4384         return arch_atomic64_sub_and_test(i, v);
4385 #else                                            4385 #else
4386         return raw_atomic64_sub_return(i, v)     4386         return raw_atomic64_sub_return(i, v) == 0;
4387 #endif                                           4387 #endif
4388 }                                                4388 }
4389                                                  4389 
4390 /**                                              4390 /**
4391  * raw_atomic64_dec_and_test() - atomic decre    4391  * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
4392  * @v: pointer to atomic64_t                     4392  * @v: pointer to atomic64_t
4393  *                                               4393  *
4394  * Atomically updates @v to (@v - 1) with ful    4394  * Atomically updates @v to (@v - 1) with full ordering.
4395  *                                               4395  *
4396  * Safe to use in noinstr code; prefer atomic    4396  * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
4397  *                                               4397  *
4398  * Return: @true if the resulting value of @v    4398  * Return: @true if the resulting value of @v is zero, @false otherwise.
4399  */                                              4399  */
4400 static __always_inline bool                      4400 static __always_inline bool
4401 raw_atomic64_dec_and_test(atomic64_t *v)         4401 raw_atomic64_dec_and_test(atomic64_t *v)
4402 {                                                4402 {
4403 #if defined(arch_atomic64_dec_and_test)          4403 #if defined(arch_atomic64_dec_and_test)
4404         return arch_atomic64_dec_and_test(v);    4404         return arch_atomic64_dec_and_test(v);
4405 #else                                            4405 #else
4406         return raw_atomic64_dec_return(v) ==     4406         return raw_atomic64_dec_return(v) == 0;
4407 #endif                                           4407 #endif
4408 }                                                4408 }
4409                                                  4409 
4410 /**                                              4410 /**
4411  * raw_atomic64_inc_and_test() - atomic incre    4411  * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
4412  * @v: pointer to atomic64_t                     4412  * @v: pointer to atomic64_t
4413  *                                               4413  *
4414  * Atomically updates @v to (@v + 1) with ful    4414  * Atomically updates @v to (@v + 1) with full ordering.
4415  *                                               4415  *
4416  * Safe to use in noinstr code; prefer atomic    4416  * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
4417  *                                               4417  *
4418  * Return: @true if the resulting value of @v    4418  * Return: @true if the resulting value of @v is zero, @false otherwise.
4419  */                                              4419  */
4420 static __always_inline bool                      4420 static __always_inline bool
4421 raw_atomic64_inc_and_test(atomic64_t *v)         4421 raw_atomic64_inc_and_test(atomic64_t *v)
4422 {                                                4422 {
4423 #if defined(arch_atomic64_inc_and_test)          4423 #if defined(arch_atomic64_inc_and_test)
4424         return arch_atomic64_inc_and_test(v);    4424         return arch_atomic64_inc_and_test(v);
4425 #else                                            4425 #else
4426         return raw_atomic64_inc_return(v) ==     4426         return raw_atomic64_inc_return(v) == 0;
4427 #endif                                           4427 #endif
4428 }                                                4428 }
4429                                                  4429 
4430 /**                                              4430 /**
4431  * raw_atomic64_add_negative() - atomic add a    4431  * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
4432  * @i: s64 value to add                          4432  * @i: s64 value to add
4433  * @v: pointer to atomic64_t                     4433  * @v: pointer to atomic64_t
4434  *                                               4434  *
4435  * Atomically updates @v to (@v + @i) with fu    4435  * Atomically updates @v to (@v + @i) with full ordering.
4436  *                                               4436  *
4437  * Safe to use in noinstr code; prefer atomic    4437  * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
4438  *                                               4438  *
4439  * Return: @true if the resulting value of @v    4439  * Return: @true if the resulting value of @v is negative, @false otherwise.
4440  */                                              4440  */
4441 static __always_inline bool                      4441 static __always_inline bool
4442 raw_atomic64_add_negative(s64 i, atomic64_t *    4442 raw_atomic64_add_negative(s64 i, atomic64_t *v)
4443 {                                                4443 {
4444 #if defined(arch_atomic64_add_negative)          4444 #if defined(arch_atomic64_add_negative)
4445         return arch_atomic64_add_negative(i,     4445         return arch_atomic64_add_negative(i, v);
4446 #elif defined(arch_atomic64_add_negative_rela    4446 #elif defined(arch_atomic64_add_negative_relaxed)
4447         bool ret;                                4447         bool ret;
4448         __atomic_pre_full_fence();               4448         __atomic_pre_full_fence();
4449         ret = arch_atomic64_add_negative_rela    4449         ret = arch_atomic64_add_negative_relaxed(i, v);
4450         __atomic_post_full_fence();              4450         __atomic_post_full_fence();
4451         return ret;                              4451         return ret;
4452 #else                                            4452 #else
4453         return raw_atomic64_add_return(i, v)     4453         return raw_atomic64_add_return(i, v) < 0;
4454 #endif                                           4454 #endif
4455 }                                                4455 }
4456                                                  4456 
4457 /**                                              4457 /**
4458  * raw_atomic64_add_negative_acquire() - atom    4458  * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
4459  * @i: s64 value to add                          4459  * @i: s64 value to add
4460  * @v: pointer to atomic64_t                     4460  * @v: pointer to atomic64_t
4461  *                                               4461  *
4462  * Atomically updates @v to (@v + @i) with ac    4462  * Atomically updates @v to (@v + @i) with acquire ordering.
4463  *                                               4463  *
4464  * Safe to use in noinstr code; prefer atomic    4464  * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
4465  *                                               4465  *
4466  * Return: @true if the resulting value of @v    4466  * Return: @true if the resulting value of @v is negative, @false otherwise.
4467  */                                              4467  */
4468 static __always_inline bool                      4468 static __always_inline bool
4469 raw_atomic64_add_negative_acquire(s64 i, atom    4469 raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
4470 {                                                4470 {
4471 #if defined(arch_atomic64_add_negative_acquir    4471 #if defined(arch_atomic64_add_negative_acquire)
4472         return arch_atomic64_add_negative_acq    4472         return arch_atomic64_add_negative_acquire(i, v);
4473 #elif defined(arch_atomic64_add_negative_rela    4473 #elif defined(arch_atomic64_add_negative_relaxed)
4474         bool ret = arch_atomic64_add_negative    4474         bool ret = arch_atomic64_add_negative_relaxed(i, v);
4475         __atomic_acquire_fence();                4475         __atomic_acquire_fence();
4476         return ret;                              4476         return ret;
4477 #elif defined(arch_atomic64_add_negative)        4477 #elif defined(arch_atomic64_add_negative)
4478         return arch_atomic64_add_negative(i,     4478         return arch_atomic64_add_negative(i, v);
4479 #else                                            4479 #else
4480         return raw_atomic64_add_return_acquir    4480         return raw_atomic64_add_return_acquire(i, v) < 0;
4481 #endif                                           4481 #endif
4482 }                                                4482 }
4483                                                  4483 
4484 /**                                              4484 /**
4485  * raw_atomic64_add_negative_release() - atom    4485  * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
4486  * @i: s64 value to add                          4486  * @i: s64 value to add
4487  * @v: pointer to atomic64_t                     4487  * @v: pointer to atomic64_t
4488  *                                               4488  *
4489  * Atomically updates @v to (@v + @i) with re    4489  * Atomically updates @v to (@v + @i) with release ordering.
4490  *                                               4490  *
4491  * Safe to use in noinstr code; prefer atomic    4491  * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
4492  *                                               4492  *
4493  * Return: @true if the resulting value of @v    4493  * Return: @true if the resulting value of @v is negative, @false otherwise.
4494  */                                              4494  */
4495 static __always_inline bool                      4495 static __always_inline bool
4496 raw_atomic64_add_negative_release(s64 i, atom    4496 raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
4497 {                                                4497 {
4498 #if defined(arch_atomic64_add_negative_releas    4498 #if defined(arch_atomic64_add_negative_release)
4499         return arch_atomic64_add_negative_rel    4499         return arch_atomic64_add_negative_release(i, v);
4500 #elif defined(arch_atomic64_add_negative_rela    4500 #elif defined(arch_atomic64_add_negative_relaxed)
4501         __atomic_release_fence();                4501         __atomic_release_fence();
4502         return arch_atomic64_add_negative_rel    4502         return arch_atomic64_add_negative_relaxed(i, v);
4503 #elif defined(arch_atomic64_add_negative)        4503 #elif defined(arch_atomic64_add_negative)
4504         return arch_atomic64_add_negative(i,     4504         return arch_atomic64_add_negative(i, v);
4505 #else                                            4505 #else
4506         return raw_atomic64_add_return_releas    4506         return raw_atomic64_add_return_release(i, v) < 0;
4507 #endif                                           4507 #endif
4508 }                                                4508 }
4509                                                  4509 
4510 /**                                              4510 /**
4511  * raw_atomic64_add_negative_relaxed() - atom    4511  * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
4512  * @i: s64 value to add                          4512  * @i: s64 value to add
4513  * @v: pointer to atomic64_t                     4513  * @v: pointer to atomic64_t
4514  *                                               4514  *
4515  * Atomically updates @v to (@v + @i) with re    4515  * Atomically updates @v to (@v + @i) with relaxed ordering.
4516  *                                               4516  *
4517  * Safe to use in noinstr code; prefer atomic    4517  * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
4518  *                                               4518  *
4519  * Return: @true if the resulting value of @v    4519  * Return: @true if the resulting value of @v is negative, @false otherwise.
4520  */                                              4520  */
4521 static __always_inline bool                      4521 static __always_inline bool
4522 raw_atomic64_add_negative_relaxed(s64 i, atom    4522 raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
4523 {                                                4523 {
4524 #if defined(arch_atomic64_add_negative_relaxe    4524 #if defined(arch_atomic64_add_negative_relaxed)
4525         return arch_atomic64_add_negative_rel    4525         return arch_atomic64_add_negative_relaxed(i, v);
4526 #elif defined(arch_atomic64_add_negative)        4526 #elif defined(arch_atomic64_add_negative)
4527         return arch_atomic64_add_negative(i,     4527         return arch_atomic64_add_negative(i, v);
4528 #else                                            4528 #else
4529         return raw_atomic64_add_return_relaxe    4529         return raw_atomic64_add_return_relaxed(i, v) < 0;
4530 #endif                                           4530 #endif
4531 }                                                4531 }
4532                                                  4532 
4533 /**                                              4533 /**
4534  * raw_atomic64_fetch_add_unless() - atomic a    4534  * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
4535  * @v: pointer to atomic64_t                     4535  * @v: pointer to atomic64_t
4536  * @a: s64 value to add                          4536  * @a: s64 value to add
4537  * @u: s64 value to compare with                 4537  * @u: s64 value to compare with
4538  *                                               4538  *
4539  * If (@v != @u), atomically updates @v to (@    4539  * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4540  * Otherwise, @v is not modified and relaxed     4540  * Otherwise, @v is not modified and relaxed ordering is provided.
4541  *                                               4541  *
4542  * Safe to use in noinstr code; prefer atomic    4542  * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
4543  *                                               4543  *
4544  * Return: The original value of @v.             4544  * Return: The original value of @v.
4545  */                                              4545  */
4546 static __always_inline s64                       4546 static __always_inline s64
4547 raw_atomic64_fetch_add_unless(atomic64_t *v,     4547 raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
4548 {                                                4548 {
4549 #if defined(arch_atomic64_fetch_add_unless)      4549 #if defined(arch_atomic64_fetch_add_unless)
4550         return arch_atomic64_fetch_add_unless    4550         return arch_atomic64_fetch_add_unless(v, a, u);
4551 #else                                            4551 #else
4552         s64 c = raw_atomic64_read(v);            4552         s64 c = raw_atomic64_read(v);
4553                                                  4553 
4554         do {                                     4554         do {
4555                 if (unlikely(c == u))            4555                 if (unlikely(c == u))
4556                         break;                   4556                         break;
4557         } while (!raw_atomic64_try_cmpxchg(v,    4557         } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
4558                                                  4558 
4559         return c;                                4559         return c;
4560 #endif                                           4560 #endif
4561 }                                                4561 }
4562                                                  4562 
4563 /**                                              4563 /**
4564  * raw_atomic64_add_unless() - atomic add unl    4564  * raw_atomic64_add_unless() - atomic add unless value with full ordering
4565  * @v: pointer to atomic64_t                     4565  * @v: pointer to atomic64_t
4566  * @a: s64 value to add                          4566  * @a: s64 value to add
4567  * @u: s64 value to compare with                 4567  * @u: s64 value to compare with
4568  *                                               4568  *
4569  * If (@v != @u), atomically updates @v to (@    4569  * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4570  * Otherwise, @v is not modified and relaxed     4570  * Otherwise, @v is not modified and relaxed ordering is provided.
4571  *                                               4571  *
4572  * Safe to use in noinstr code; prefer atomic    4572  * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
4573  *                                               4573  *
4574  * Return: @true if @v was updated, @false ot    4574  * Return: @true if @v was updated, @false otherwise.
4575  */                                              4575  */
4576 static __always_inline bool                      4576 static __always_inline bool
4577 raw_atomic64_add_unless(atomic64_t *v, s64 a,    4577 raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4578 {                                                4578 {
4579 #if defined(arch_atomic64_add_unless)            4579 #if defined(arch_atomic64_add_unless)
4580         return arch_atomic64_add_unless(v, a,    4580         return arch_atomic64_add_unless(v, a, u);
4581 #else                                            4581 #else
4582         return raw_atomic64_fetch_add_unless(    4582         return raw_atomic64_fetch_add_unless(v, a, u) != u;
4583 #endif                                           4583 #endif
4584 }                                                4584 }
4585                                                  4585 
4586 /**                                              4586 /**
4587  * raw_atomic64_inc_not_zero() - atomic incre    4587  * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
4588  * @v: pointer to atomic64_t                     4588  * @v: pointer to atomic64_t
4589  *                                               4589  *
4590  * If (@v != 0), atomically updates @v to (@v    4590  * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
4591  * Otherwise, @v is not modified and relaxed     4591  * Otherwise, @v is not modified and relaxed ordering is provided.
4592  *                                               4592  *
4593  * Safe to use in noinstr code; prefer atomic    4593  * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
4594  *                                               4594  *
4595  * Return: @true if @v was updated, @false ot    4595  * Return: @true if @v was updated, @false otherwise.
4596  */                                              4596  */
4597 static __always_inline bool                      4597 static __always_inline bool
4598 raw_atomic64_inc_not_zero(atomic64_t *v)         4598 raw_atomic64_inc_not_zero(atomic64_t *v)
4599 {                                                4599 {
4600 #if defined(arch_atomic64_inc_not_zero)          4600 #if defined(arch_atomic64_inc_not_zero)
4601         return arch_atomic64_inc_not_zero(v);    4601         return arch_atomic64_inc_not_zero(v);
4602 #else                                            4602 #else
4603         return raw_atomic64_add_unless(v, 1,     4603         return raw_atomic64_add_unless(v, 1, 0);
4604 #endif                                           4604 #endif
4605 }                                                4605 }
4606                                                  4606 
4607 /**                                              4607 /**
4608  * raw_atomic64_inc_unless_negative() - atomi    4608  * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
4609  * @v: pointer to atomic64_t                     4609  * @v: pointer to atomic64_t
4610  *                                               4610  *
4611  * If (@v >= 0), atomically updates @v to (@v    4611  * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
4612  * Otherwise, @v is not modified and relaxed     4612  * Otherwise, @v is not modified and relaxed ordering is provided.
4613  *                                               4613  *
4614  * Safe to use in noinstr code; prefer atomic    4614  * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
4615  *                                               4615  *
4616  * Return: @true if @v was updated, @false ot    4616  * Return: @true if @v was updated, @false otherwise.
4617  */                                              4617  */
4618 static __always_inline bool                      4618 static __always_inline bool
4619 raw_atomic64_inc_unless_negative(atomic64_t *    4619 raw_atomic64_inc_unless_negative(atomic64_t *v)
4620 {                                                4620 {
4621 #if defined(arch_atomic64_inc_unless_negative    4621 #if defined(arch_atomic64_inc_unless_negative)
4622         return arch_atomic64_inc_unless_negat    4622         return arch_atomic64_inc_unless_negative(v);
4623 #else                                            4623 #else
4624         s64 c = raw_atomic64_read(v);            4624         s64 c = raw_atomic64_read(v);
4625                                                  4625 
4626         do {                                     4626         do {
4627                 if (unlikely(c < 0))             4627                 if (unlikely(c < 0))
4628                         return false;            4628                         return false;
4629         } while (!raw_atomic64_try_cmpxchg(v,    4629         } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
4630                                                  4630 
4631         return true;                             4631         return true;
4632 #endif                                           4632 #endif
4633 }                                                4633 }
4634                                                  4634 
4635 /**                                              4635 /**
4636  * raw_atomic64_dec_unless_positive() - atomi    4636  * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
4637  * @v: pointer to atomic64_t                     4637  * @v: pointer to atomic64_t
4638  *                                               4638  *
4639  * If (@v <= 0), atomically updates @v to (@v    4639  * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
4640  * Otherwise, @v is not modified and relaxed     4640  * Otherwise, @v is not modified and relaxed ordering is provided.
4641  *                                               4641  *
4642  * Safe to use in noinstr code; prefer atomic    4642  * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
4643  *                                               4643  *
4644  * Return: @true if @v was updated, @false ot    4644  * Return: @true if @v was updated, @false otherwise.
4645  */                                              4645  */
4646 static __always_inline bool                      4646 static __always_inline bool
4647 raw_atomic64_dec_unless_positive(atomic64_t *    4647 raw_atomic64_dec_unless_positive(atomic64_t *v)
4648 {                                                4648 {
4649 #if defined(arch_atomic64_dec_unless_positive    4649 #if defined(arch_atomic64_dec_unless_positive)
4650         return arch_atomic64_dec_unless_posit    4650         return arch_atomic64_dec_unless_positive(v);
4651 #else                                            4651 #else
4652         s64 c = raw_atomic64_read(v);            4652         s64 c = raw_atomic64_read(v);
4653                                                  4653 
4654         do {                                     4654         do {
4655                 if (unlikely(c > 0))             4655                 if (unlikely(c > 0))
4656                         return false;            4656                         return false;
4657         } while (!raw_atomic64_try_cmpxchg(v,    4657         } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
4658                                                  4658 
4659         return true;                             4659         return true;
4660 #endif                                           4660 #endif
4661 }                                                4661 }
4662                                                  4662 
4663 /**                                              4663 /**
4664  * raw_atomic64_dec_if_positive() - atomic de    4664  * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
4665  * @v: pointer to atomic64_t                     4665  * @v: pointer to atomic64_t
4666  *                                               4666  *
4667  * If (@v > 0), atomically updates @v to (@v     4667  * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
4668  * Otherwise, @v is not modified and relaxed     4668  * Otherwise, @v is not modified and relaxed ordering is provided.
4669  *                                               4669  *
4670  * Safe to use in noinstr code; prefer atomic    4670  * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
4671  *                                               4671  *
4672  * Return: The old value of (@v - 1), regardl    4672  * Return: The old value of (@v - 1), regardless of whether @v was updated.
4673  */                                              4673  */
4674 static __always_inline s64                       4674 static __always_inline s64
4675 raw_atomic64_dec_if_positive(atomic64_t *v)      4675 raw_atomic64_dec_if_positive(atomic64_t *v)
4676 {                                                4676 {
4677 #if defined(arch_atomic64_dec_if_positive)       4677 #if defined(arch_atomic64_dec_if_positive)
4678         return arch_atomic64_dec_if_positive(    4678         return arch_atomic64_dec_if_positive(v);
4679 #else                                            4679 #else
4680         s64 dec, c = raw_atomic64_read(v);       4680         s64 dec, c = raw_atomic64_read(v);
4681                                                  4681 
4682         do {                                     4682         do {
4683                 dec = c - 1;                     4683                 dec = c - 1;
4684                 if (unlikely(dec < 0))           4684                 if (unlikely(dec < 0))
4685                         break;                   4685                         break;
4686         } while (!raw_atomic64_try_cmpxchg(v,    4686         } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
4687                                                  4687 
4688         return dec;                              4688         return dec;
4689 #endif                                           4689 #endif
4690 }                                                4690 }
4691                                                  4691 
4692 #endif /* _LINUX_ATOMIC_FALLBACK_H */            4692 #endif /* _LINUX_ATOMIC_FALLBACK_H */
4693 // b565db590afeeff0d7c9485ccbca5bb6e155749f      4693 // b565db590afeeff0d7c9485ccbca5bb6e155749f
4694                                                  4694 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php