~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/atomic/atomic-arch-fallback.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/atomic/atomic-arch-fallback.h (Version linux-6.12-rc7) and /include/linux/atomic/atomic-arch-fallback.h (Version linux-6.2.16)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2                                                     2 
  3 // Generated by scripts/atomic/gen-atomic-fall      3 // Generated by scripts/atomic/gen-atomic-fallback.sh
  4 // DO NOT MODIFY THIS FILE DIRECTLY                 4 // DO NOT MODIFY THIS FILE DIRECTLY
  5                                                     5 
  6 #ifndef _LINUX_ATOMIC_FALLBACK_H                    6 #ifndef _LINUX_ATOMIC_FALLBACK_H
  7 #define _LINUX_ATOMIC_FALLBACK_H                    7 #define _LINUX_ATOMIC_FALLBACK_H
  8                                                     8 
  9 #include <linux/compiler.h>                         9 #include <linux/compiler.h>
 10                                                    10 
 11 #if defined(arch_xchg)                         !!  11 #ifndef arch_xchg_relaxed
 12 #define raw_xchg arch_xchg                     !!  12 #define arch_xchg_acquire arch_xchg
 13 #elif defined(arch_xchg_relaxed)               !!  13 #define arch_xchg_release arch_xchg
 14 #define raw_xchg(...) \                        !!  14 #define arch_xchg_relaxed arch_xchg
 15         __atomic_op_fence(arch_xchg, __VA_ARGS !!  15 #else /* arch_xchg_relaxed */
 16 #else                                          << 
 17 extern void raw_xchg_not_implemented(void);    << 
 18 #define raw_xchg(...) raw_xchg_not_implemented << 
 19 #endif                                         << 
 20                                                    16 
 21 #if defined(arch_xchg_acquire)                 !!  17 #ifndef arch_xchg_acquire
 22 #define raw_xchg_acquire arch_xchg_acquire     !!  18 #define arch_xchg_acquire(...) \
 23 #elif defined(arch_xchg_relaxed)               << 
 24 #define raw_xchg_acquire(...) \                << 
 25         __atomic_op_acquire(arch_xchg, __VA_AR     19         __atomic_op_acquire(arch_xchg, __VA_ARGS__)
 26 #elif defined(arch_xchg)                       << 
 27 #define raw_xchg_acquire arch_xchg             << 
 28 #else                                          << 
 29 extern void raw_xchg_acquire_not_implemented(v << 
 30 #define raw_xchg_acquire(...) raw_xchg_acquire << 
 31 #endif                                             20 #endif
 32                                                    21 
 33 #if defined(arch_xchg_release)                 !!  22 #ifndef arch_xchg_release
 34 #define raw_xchg_release arch_xchg_release     !!  23 #define arch_xchg_release(...) \
 35 #elif defined(arch_xchg_relaxed)               << 
 36 #define raw_xchg_release(...) \                << 
 37         __atomic_op_release(arch_xchg, __VA_AR     24         __atomic_op_release(arch_xchg, __VA_ARGS__)
 38 #elif defined(arch_xchg)                       << 
 39 #define raw_xchg_release arch_xchg             << 
 40 #else                                          << 
 41 extern void raw_xchg_release_not_implemented(v << 
 42 #define raw_xchg_release(...) raw_xchg_release << 
 43 #endif                                         << 
 44                                                << 
 45 #if defined(arch_xchg_relaxed)                 << 
 46 #define raw_xchg_relaxed arch_xchg_relaxed     << 
 47 #elif defined(arch_xchg)                       << 
 48 #define raw_xchg_relaxed arch_xchg             << 
 49 #else                                          << 
 50 extern void raw_xchg_relaxed_not_implemented(v << 
 51 #define raw_xchg_relaxed(...) raw_xchg_relaxed << 
 52 #endif                                         << 
 53                                                << 
 54 #if defined(arch_cmpxchg)                      << 
 55 #define raw_cmpxchg arch_cmpxchg               << 
 56 #elif defined(arch_cmpxchg_relaxed)            << 
 57 #define raw_cmpxchg(...) \                     << 
 58         __atomic_op_fence(arch_cmpxchg, __VA_A << 
 59 #else                                          << 
 60 extern void raw_cmpxchg_not_implemented(void); << 
 61 #define raw_cmpxchg(...) raw_cmpxchg_not_imple << 
 62 #endif                                             25 #endif
 63                                                    26 
 64 #if defined(arch_cmpxchg_acquire)              !!  27 #ifndef arch_xchg
 65 #define raw_cmpxchg_acquire arch_cmpxchg_acqui !!  28 #define arch_xchg(...) \
 66 #elif defined(arch_cmpxchg_relaxed)            !!  29         __atomic_op_fence(arch_xchg, __VA_ARGS__)
 67 #define raw_cmpxchg_acquire(...) \             !!  30 #endif
                                                   >>  31 
                                                   >>  32 #endif /* arch_xchg_relaxed */
                                                   >>  33 
                                                   >>  34 #ifndef arch_cmpxchg_relaxed
                                                   >>  35 #define arch_cmpxchg_acquire arch_cmpxchg
                                                   >>  36 #define arch_cmpxchg_release arch_cmpxchg
                                                   >>  37 #define arch_cmpxchg_relaxed arch_cmpxchg
                                                   >>  38 #else /* arch_cmpxchg_relaxed */
                                                   >>  39 
                                                   >>  40 #ifndef arch_cmpxchg_acquire
                                                   >>  41 #define arch_cmpxchg_acquire(...) \
 68         __atomic_op_acquire(arch_cmpxchg, __VA     42         __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
 69 #elif defined(arch_cmpxchg)                    << 
 70 #define raw_cmpxchg_acquire arch_cmpxchg       << 
 71 #else                                          << 
 72 extern void raw_cmpxchg_acquire_not_implemente << 
 73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_a << 
 74 #endif                                             43 #endif
 75                                                    44 
 76 #if defined(arch_cmpxchg_release)              !!  45 #ifndef arch_cmpxchg_release
 77 #define raw_cmpxchg_release arch_cmpxchg_relea !!  46 #define arch_cmpxchg_release(...) \
 78 #elif defined(arch_cmpxchg_relaxed)            << 
 79 #define raw_cmpxchg_release(...) \             << 
 80         __atomic_op_release(arch_cmpxchg, __VA     47         __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
 81 #elif defined(arch_cmpxchg)                    << 
 82 #define raw_cmpxchg_release arch_cmpxchg       << 
 83 #else                                          << 
 84 extern void raw_cmpxchg_release_not_implemente << 
 85 #define raw_cmpxchg_release(...) raw_cmpxchg_r << 
 86 #endif                                         << 
 87                                                << 
 88 #if defined(arch_cmpxchg_relaxed)              << 
 89 #define raw_cmpxchg_relaxed arch_cmpxchg_relax << 
 90 #elif defined(arch_cmpxchg)                    << 
 91 #define raw_cmpxchg_relaxed arch_cmpxchg       << 
 92 #else                                          << 
 93 extern void raw_cmpxchg_relaxed_not_implemente << 
 94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_r << 
 95 #endif                                         << 
 96                                                << 
 97 #if defined(arch_cmpxchg64)                    << 
 98 #define raw_cmpxchg64 arch_cmpxchg64           << 
 99 #elif defined(arch_cmpxchg64_relaxed)          << 
100 #define raw_cmpxchg64(...) \                   << 
101         __atomic_op_fence(arch_cmpxchg64, __VA << 
102 #else                                          << 
103 extern void raw_cmpxchg64_not_implemented(void << 
104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_i << 
105 #endif                                             48 #endif
106                                                    49 
107 #if defined(arch_cmpxchg64_acquire)            !!  50 #ifndef arch_cmpxchg
108 #define raw_cmpxchg64_acquire arch_cmpxchg64_a !!  51 #define arch_cmpxchg(...) \
109 #elif defined(arch_cmpxchg64_relaxed)          !!  52         __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
110 #define raw_cmpxchg64_acquire(...) \           !!  53 #endif
                                                   >>  54 
                                                   >>  55 #endif /* arch_cmpxchg_relaxed */
                                                   >>  56 
                                                   >>  57 #ifndef arch_cmpxchg64_relaxed
                                                   >>  58 #define arch_cmpxchg64_acquire arch_cmpxchg64
                                                   >>  59 #define arch_cmpxchg64_release arch_cmpxchg64
                                                   >>  60 #define arch_cmpxchg64_relaxed arch_cmpxchg64
                                                   >>  61 #else /* arch_cmpxchg64_relaxed */
                                                   >>  62 
                                                   >>  63 #ifndef arch_cmpxchg64_acquire
                                                   >>  64 #define arch_cmpxchg64_acquire(...) \
111         __atomic_op_acquire(arch_cmpxchg64, __     65         __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
112 #elif defined(arch_cmpxchg64)                  << 
113 #define raw_cmpxchg64_acquire arch_cmpxchg64   << 
114 #else                                          << 
115 extern void raw_cmpxchg64_acquire_not_implemen << 
116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg << 
117 #endif                                             66 #endif
118                                                    67 
119 #if defined(arch_cmpxchg64_release)            !!  68 #ifndef arch_cmpxchg64_release
120 #define raw_cmpxchg64_release arch_cmpxchg64_r !!  69 #define arch_cmpxchg64_release(...) \
121 #elif defined(arch_cmpxchg64_relaxed)          << 
122 #define raw_cmpxchg64_release(...) \           << 
123         __atomic_op_release(arch_cmpxchg64, __     70         __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
124 #elif defined(arch_cmpxchg64)                  << 
125 #define raw_cmpxchg64_release arch_cmpxchg64   << 
126 #else                                          << 
127 extern void raw_cmpxchg64_release_not_implemen << 
128 #define raw_cmpxchg64_release(...) raw_cmpxchg << 
129 #endif                                         << 
130                                                << 
131 #if defined(arch_cmpxchg64_relaxed)            << 
132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_r << 
133 #elif defined(arch_cmpxchg64)                  << 
134 #define raw_cmpxchg64_relaxed arch_cmpxchg64   << 
135 #else                                          << 
136 extern void raw_cmpxchg64_relaxed_not_implemen << 
137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg << 
138 #endif                                         << 
139                                                << 
140 #if defined(arch_cmpxchg128)                   << 
141 #define raw_cmpxchg128 arch_cmpxchg128         << 
142 #elif defined(arch_cmpxchg128_relaxed)         << 
143 #define raw_cmpxchg128(...) \                  << 
144         __atomic_op_fence(arch_cmpxchg128, __V << 
145 #else                                          << 
146 extern void raw_cmpxchg128_not_implemented(voi << 
147 #define raw_cmpxchg128(...) raw_cmpxchg128_not << 
148 #endif                                         << 
149                                                << 
150 #if defined(arch_cmpxchg128_acquire)           << 
151 #define raw_cmpxchg128_acquire arch_cmpxchg128 << 
152 #elif defined(arch_cmpxchg128_relaxed)         << 
153 #define raw_cmpxchg128_acquire(...) \          << 
154         __atomic_op_acquire(arch_cmpxchg128, _ << 
155 #elif defined(arch_cmpxchg128)                 << 
156 #define raw_cmpxchg128_acquire arch_cmpxchg128 << 
157 #else                                          << 
158 extern void raw_cmpxchg128_acquire_not_impleme << 
159 #define raw_cmpxchg128_acquire(...) raw_cmpxch << 
160 #endif                                         << 
161                                                << 
162 #if defined(arch_cmpxchg128_release)           << 
163 #define raw_cmpxchg128_release arch_cmpxchg128 << 
164 #elif defined(arch_cmpxchg128_relaxed)         << 
165 #define raw_cmpxchg128_release(...) \          << 
166         __atomic_op_release(arch_cmpxchg128, _ << 
167 #elif defined(arch_cmpxchg128)                 << 
168 #define raw_cmpxchg128_release arch_cmpxchg128 << 
169 #else                                          << 
170 extern void raw_cmpxchg128_release_not_impleme << 
171 #define raw_cmpxchg128_release(...) raw_cmpxch << 
172 #endif                                         << 
173                                                << 
174 #if defined(arch_cmpxchg128_relaxed)           << 
175 #define raw_cmpxchg128_relaxed arch_cmpxchg128 << 
176 #elif defined(arch_cmpxchg128)                 << 
177 #define raw_cmpxchg128_relaxed arch_cmpxchg128 << 
178 #else                                          << 
179 extern void raw_cmpxchg128_relaxed_not_impleme << 
180 #define raw_cmpxchg128_relaxed(...) raw_cmpxch << 
181 #endif                                         << 
182                                                << 
183 #if defined(arch_try_cmpxchg)                  << 
184 #define raw_try_cmpxchg arch_try_cmpxchg       << 
185 #elif defined(arch_try_cmpxchg_relaxed)        << 
186 #define raw_try_cmpxchg(...) \                 << 
187         __atomic_op_fence(arch_try_cmpxchg, __ << 
188 #else                                          << 
189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \   << 
190 ({ \                                           << 
191         typeof(*(_ptr)) *___op = (_oldp), ___o << 
192         ___r = raw_cmpxchg((_ptr), ___o, (_new << 
193         if (unlikely(___r != ___o)) \          << 
194                 *___op = ___r; \               << 
195         likely(___r == ___o); \                << 
196 })                                             << 
197 #endif                                             71 #endif
198                                                    72 
199 #if defined(arch_try_cmpxchg_acquire)          !!  73 #ifndef arch_cmpxchg64
200 #define raw_try_cmpxchg_acquire arch_try_cmpxc !!  74 #define arch_cmpxchg64(...) \
201 #elif defined(arch_try_cmpxchg_relaxed)        !!  75         __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
202 #define raw_try_cmpxchg_acquire(...) \         << 
203         __atomic_op_acquire(arch_try_cmpxchg,  << 
204 #elif defined(arch_try_cmpxchg)                << 
205 #define raw_try_cmpxchg_acquire arch_try_cmpxc << 
206 #else                                          << 
207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _ << 
208 ({ \                                           << 
209         typeof(*(_ptr)) *___op = (_oldp), ___o << 
210         ___r = raw_cmpxchg_acquire((_ptr), ___ << 
211         if (unlikely(___r != ___o)) \          << 
212                 *___op = ___r; \               << 
213         likely(___r == ___o); \                << 
214 })                                             << 
215 #endif                                             76 #endif
216                                                    77 
217 #if defined(arch_try_cmpxchg_release)          !!  78 #endif /* arch_cmpxchg64_relaxed */
218 #define raw_try_cmpxchg_release arch_try_cmpxc << 
219 #elif defined(arch_try_cmpxchg_relaxed)        << 
220 #define raw_try_cmpxchg_release(...) \         << 
221         __atomic_op_release(arch_try_cmpxchg,  << 
222 #elif defined(arch_try_cmpxchg)                << 
223 #define raw_try_cmpxchg_release arch_try_cmpxc << 
224 #else                                          << 
225 #define raw_try_cmpxchg_release(_ptr, _oldp, _ << 
226 ({ \                                           << 
227         typeof(*(_ptr)) *___op = (_oldp), ___o << 
228         ___r = raw_cmpxchg_release((_ptr), ___ << 
229         if (unlikely(___r != ___o)) \          << 
230                 *___op = ___r; \               << 
231         likely(___r == ___o); \                << 
232 })                                             << 
233 #endif                                         << 
234                                                    79 
235 #if defined(arch_try_cmpxchg_relaxed)          !!  80 #ifndef arch_try_cmpxchg_relaxed
236 #define raw_try_cmpxchg_relaxed arch_try_cmpxc !!  81 #ifdef arch_try_cmpxchg
237 #elif defined(arch_try_cmpxchg)                !!  82 #define arch_try_cmpxchg_acquire arch_try_cmpxchg
238 #define raw_try_cmpxchg_relaxed arch_try_cmpxc !!  83 #define arch_try_cmpxchg_release arch_try_cmpxchg
239 #else                                          !!  84 #define arch_try_cmpxchg_relaxed arch_try_cmpxchg
240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _ !!  85 #endif /* arch_try_cmpxchg */
241 ({ \                                           << 
242         typeof(*(_ptr)) *___op = (_oldp), ___o << 
243         ___r = raw_cmpxchg_relaxed((_ptr), ___ << 
244         if (unlikely(___r != ___o)) \          << 
245                 *___op = ___r; \               << 
246         likely(___r == ___o); \                << 
247 })                                             << 
248 #endif                                         << 
249                                                    86 
250 #if defined(arch_try_cmpxchg64)                !!  87 #ifndef arch_try_cmpxchg
251 #define raw_try_cmpxchg64 arch_try_cmpxchg64   !!  88 #define arch_try_cmpxchg(_ptr, _oldp, _new) \
252 #elif defined(arch_try_cmpxchg64_relaxed)      << 
253 #define raw_try_cmpxchg64(...) \               << 
254         __atomic_op_fence(arch_try_cmpxchg64,  << 
255 #else                                          << 
256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \ << 
257 ({ \                                               89 ({ \
258         typeof(*(_ptr)) *___op = (_oldp), ___o     90         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
259         ___r = raw_cmpxchg64((_ptr), ___o, (_n !!  91         ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
260         if (unlikely(___r != ___o)) \              92         if (unlikely(___r != ___o)) \
261                 *___op = ___r; \                   93                 *___op = ___r; \
262         likely(___r == ___o); \                    94         likely(___r == ___o); \
263 })                                                 95 })
264 #endif                                         !!  96 #endif /* arch_try_cmpxchg */
265                                                    97 
266 #if defined(arch_try_cmpxchg64_acquire)        !!  98 #ifndef arch_try_cmpxchg_acquire
267 #define raw_try_cmpxchg64_acquire arch_try_cmp !!  99 #define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
268 #elif defined(arch_try_cmpxchg64_relaxed)      << 
269 #define raw_try_cmpxchg64_acquire(...) \       << 
270         __atomic_op_acquire(arch_try_cmpxchg64 << 
271 #elif defined(arch_try_cmpxchg64)              << 
272 #define raw_try_cmpxchg64_acquire arch_try_cmp << 
273 #else                                          << 
274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp, << 
275 ({ \                                              100 ({ \
276         typeof(*(_ptr)) *___op = (_oldp), ___o    101         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
277         ___r = raw_cmpxchg64_acquire((_ptr), _ !! 102         ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
278         if (unlikely(___r != ___o)) \             103         if (unlikely(___r != ___o)) \
279                 *___op = ___r; \                  104                 *___op = ___r; \
280         likely(___r == ___o); \                   105         likely(___r == ___o); \
281 })                                                106 })
282 #endif                                         !! 107 #endif /* arch_try_cmpxchg_acquire */
283                                                   108 
284 #if defined(arch_try_cmpxchg64_release)        !! 109 #ifndef arch_try_cmpxchg_release
285 #define raw_try_cmpxchg64_release arch_try_cmp !! 110 #define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
286 #elif defined(arch_try_cmpxchg64_relaxed)      << 
287 #define raw_try_cmpxchg64_release(...) \       << 
288         __atomic_op_release(arch_try_cmpxchg64 << 
289 #elif defined(arch_try_cmpxchg64)              << 
290 #define raw_try_cmpxchg64_release arch_try_cmp << 
291 #else                                          << 
292 #define raw_try_cmpxchg64_release(_ptr, _oldp, << 
293 ({ \                                              111 ({ \
294         typeof(*(_ptr)) *___op = (_oldp), ___o    112         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
295         ___r = raw_cmpxchg64_release((_ptr), _ !! 113         ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
296         if (unlikely(___r != ___o)) \             114         if (unlikely(___r != ___o)) \
297                 *___op = ___r; \                  115                 *___op = ___r; \
298         likely(___r == ___o); \                   116         likely(___r == ___o); \
299 })                                                117 })
300 #endif                                         !! 118 #endif /* arch_try_cmpxchg_release */
301                                                   119 
302 #if defined(arch_try_cmpxchg64_relaxed)        !! 120 #ifndef arch_try_cmpxchg_relaxed
303 #define raw_try_cmpxchg64_relaxed arch_try_cmp !! 121 #define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
304 #elif defined(arch_try_cmpxchg64)              << 
305 #define raw_try_cmpxchg64_relaxed arch_try_cmp << 
306 #else                                          << 
307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, << 
308 ({ \                                              122 ({ \
309         typeof(*(_ptr)) *___op = (_oldp), ___o    123         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
310         ___r = raw_cmpxchg64_relaxed((_ptr), _ !! 124         ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
311         if (unlikely(___r != ___o)) \             125         if (unlikely(___r != ___o)) \
312                 *___op = ___r; \                  126                 *___op = ___r; \
313         likely(___r == ___o); \                   127         likely(___r == ___o); \
314 })                                                128 })
                                                   >> 129 #endif /* arch_try_cmpxchg_relaxed */
                                                   >> 130 
                                                   >> 131 #else /* arch_try_cmpxchg_relaxed */
                                                   >> 132 
                                                   >> 133 #ifndef arch_try_cmpxchg_acquire
                                                   >> 134 #define arch_try_cmpxchg_acquire(...) \
                                                   >> 135         __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
315 #endif                                            136 #endif
316                                                   137 
317 #if defined(arch_try_cmpxchg128)               !! 138 #ifndef arch_try_cmpxchg_release
318 #define raw_try_cmpxchg128 arch_try_cmpxchg128 !! 139 #define arch_try_cmpxchg_release(...) \
319 #elif defined(arch_try_cmpxchg128_relaxed)     !! 140         __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
320 #define raw_try_cmpxchg128(...) \              << 
321         __atomic_op_fence(arch_try_cmpxchg128, << 
322 #else                                          << 
323 #define raw_try_cmpxchg128(_ptr, _oldp, _new)  << 
324 ({ \                                           << 
325         typeof(*(_ptr)) *___op = (_oldp), ___o << 
326         ___r = raw_cmpxchg128((_ptr), ___o, (_ << 
327         if (unlikely(___r != ___o)) \          << 
328                 *___op = ___r; \               << 
329         likely(___r == ___o); \                << 
330 })                                             << 
331 #endif                                            141 #endif
332                                                   142 
333 #if defined(arch_try_cmpxchg128_acquire)       !! 143 #ifndef arch_try_cmpxchg
334 #define raw_try_cmpxchg128_acquire arch_try_cm !! 144 #define arch_try_cmpxchg(...) \
335 #elif defined(arch_try_cmpxchg128_relaxed)     !! 145         __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
336 #define raw_try_cmpxchg128_acquire(...) \      << 
337         __atomic_op_acquire(arch_try_cmpxchg12 << 
338 #elif defined(arch_try_cmpxchg128)             << 
339 #define raw_try_cmpxchg128_acquire arch_try_cm << 
340 #else                                          << 
341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp << 
342 ({ \                                           << 
343         typeof(*(_ptr)) *___op = (_oldp), ___o << 
344         ___r = raw_cmpxchg128_acquire((_ptr),  << 
345         if (unlikely(___r != ___o)) \          << 
346                 *___op = ___r; \               << 
347         likely(___r == ___o); \                << 
348 })                                             << 
349 #endif                                            146 #endif
350                                                   147 
351 #if defined(arch_try_cmpxchg128_release)       !! 148 #endif /* arch_try_cmpxchg_relaxed */
352 #define raw_try_cmpxchg128_release arch_try_cm !! 149 
353 #elif defined(arch_try_cmpxchg128_relaxed)     !! 150 #ifndef arch_try_cmpxchg64_relaxed
354 #define raw_try_cmpxchg128_release(...) \      !! 151 #ifdef arch_try_cmpxchg64
355         __atomic_op_release(arch_try_cmpxchg12 !! 152 #define arch_try_cmpxchg64_acquire arch_try_cmpxchg64
356 #elif defined(arch_try_cmpxchg128)             !! 153 #define arch_try_cmpxchg64_release arch_try_cmpxchg64
357 #define raw_try_cmpxchg128_release arch_try_cm !! 154 #define arch_try_cmpxchg64_relaxed arch_try_cmpxchg64
358 #else                                          !! 155 #endif /* arch_try_cmpxchg64 */
359 #define raw_try_cmpxchg128_release(_ptr, _oldp !! 156 
                                                   >> 157 #ifndef arch_try_cmpxchg64
                                                   >> 158 #define arch_try_cmpxchg64(_ptr, _oldp, _new) \
360 ({ \                                              159 ({ \
361         typeof(*(_ptr)) *___op = (_oldp), ___o    160         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
362         ___r = raw_cmpxchg128_release((_ptr),  !! 161         ___r = arch_cmpxchg64((_ptr), ___o, (_new)); \
363         if (unlikely(___r != ___o)) \             162         if (unlikely(___r != ___o)) \
364                 *___op = ___r; \                  163                 *___op = ___r; \
365         likely(___r == ___o); \                   164         likely(___r == ___o); \
366 })                                                165 })
367 #endif                                         !! 166 #endif /* arch_try_cmpxchg64 */
368                                                   167 
369 #if defined(arch_try_cmpxchg128_relaxed)       !! 168 #ifndef arch_try_cmpxchg64_acquire
370 #define raw_try_cmpxchg128_relaxed arch_try_cm !! 169 #define arch_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
371 #elif defined(arch_try_cmpxchg128)             << 
372 #define raw_try_cmpxchg128_relaxed arch_try_cm << 
373 #else                                          << 
374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp << 
375 ({ \                                              170 ({ \
376         typeof(*(_ptr)) *___op = (_oldp), ___o    171         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
377         ___r = raw_cmpxchg128_relaxed((_ptr),  !! 172         ___r = arch_cmpxchg64_acquire((_ptr), ___o, (_new)); \
378         if (unlikely(___r != ___o)) \             173         if (unlikely(___r != ___o)) \
379                 *___op = ___r; \                  174                 *___op = ___r; \
380         likely(___r == ___o); \                   175         likely(___r == ___o); \
381 })                                                176 })
382 #endif                                         !! 177 #endif /* arch_try_cmpxchg64_acquire */
383                                                << 
384 #define raw_cmpxchg_local arch_cmpxchg_local   << 
385                                                   178 
386 #ifdef arch_try_cmpxchg_local                  !! 179 #ifndef arch_try_cmpxchg64_release
387 #define raw_try_cmpxchg_local arch_try_cmpxchg !! 180 #define arch_try_cmpxchg64_release(_ptr, _oldp, _new) \
388 #else                                          << 
389 #define raw_try_cmpxchg_local(_ptr, _oldp, _ne << 
390 ({ \                                              181 ({ \
391         typeof(*(_ptr)) *___op = (_oldp), ___o    182         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
392         ___r = raw_cmpxchg_local((_ptr), ___o, !! 183         ___r = arch_cmpxchg64_release((_ptr), ___o, (_new)); \
393         if (unlikely(___r != ___o)) \             184         if (unlikely(___r != ___o)) \
394                 *___op = ___r; \                  185                 *___op = ___r; \
395         likely(___r == ___o); \                   186         likely(___r == ___o); \
396 })                                                187 })
397 #endif                                         !! 188 #endif /* arch_try_cmpxchg64_release */
398                                                   189 
399 #define raw_cmpxchg64_local arch_cmpxchg64_loc !! 190 #ifndef arch_try_cmpxchg64_relaxed
400                                                !! 191 #define arch_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
401 #ifdef arch_try_cmpxchg64_local                << 
402 #define raw_try_cmpxchg64_local arch_try_cmpxc << 
403 #else                                          << 
404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _ << 
405 ({ \                                              192 ({ \
406         typeof(*(_ptr)) *___op = (_oldp), ___o    193         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
407         ___r = raw_cmpxchg64_local((_ptr), ___ !! 194         ___r = arch_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
408         if (unlikely(___r != ___o)) \             195         if (unlikely(___r != ___o)) \
409                 *___op = ___r; \                  196                 *___op = ___r; \
410         likely(___r == ___o); \                   197         likely(___r == ___o); \
411 })                                                198 })
412 #endif                                         !! 199 #endif /* arch_try_cmpxchg64_relaxed */
413                                                   200 
414 #define raw_cmpxchg128_local arch_cmpxchg128_l !! 201 #else /* arch_try_cmpxchg64_relaxed */
415                                                   202 
416 #ifdef arch_try_cmpxchg128_local               !! 203 #ifndef arch_try_cmpxchg64_acquire
417 #define raw_try_cmpxchg128_local arch_try_cmpx !! 204 #define arch_try_cmpxchg64_acquire(...) \
418 #else                                          !! 205         __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
419 #define raw_try_cmpxchg128_local(_ptr, _oldp,  << 
420 ({ \                                           << 
421         typeof(*(_ptr)) *___op = (_oldp), ___o << 
422         ___r = raw_cmpxchg128_local((_ptr), __ << 
423         if (unlikely(___r != ___o)) \          << 
424                 *___op = ___r; \               << 
425         likely(___r == ___o); \                << 
426 })                                             << 
427 #endif                                            206 #endif
428                                                   207 
429 #define raw_sync_cmpxchg arch_sync_cmpxchg     !! 208 #ifndef arch_try_cmpxchg64_release
                                                   >> 209 #define arch_try_cmpxchg64_release(...) \
                                                   >> 210         __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
                                                   >> 211 #endif
430                                                   212 
431 #ifdef arch_sync_try_cmpxchg                   !! 213 #ifndef arch_try_cmpxchg64
432 #define raw_sync_try_cmpxchg arch_sync_try_cmp !! 214 #define arch_try_cmpxchg64(...) \
433 #else                                          !! 215         __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
434 #define raw_sync_try_cmpxchg(_ptr, _oldp, _new << 
435 ({ \                                           << 
436         typeof(*(_ptr)) *___op = (_oldp), ___o << 
437         ___r = raw_sync_cmpxchg((_ptr), ___o,  << 
438         if (unlikely(___r != ___o)) \          << 
439                 *___op = ___r; \               << 
440         likely(___r == ___o); \                << 
441 })                                             << 
442 #endif                                            216 #endif
443                                                   217 
444 /**                                            !! 218 #endif /* arch_try_cmpxchg64_relaxed */
445  * raw_atomic_read() - atomic load with relaxe << 
446  * @v: pointer to atomic_t                     << 
447  *                                             << 
448  * Atomically loads the value of @v with relax << 
449  *                                             << 
450  * Safe to use in noinstr code; prefer atomic_ << 
451  *                                             << 
452  * Return: The value loaded from @v.           << 
453  */                                            << 
454 static __always_inline int                     << 
455 raw_atomic_read(const atomic_t *v)             << 
456 {                                              << 
457         return arch_atomic_read(v);            << 
458 }                                              << 
459                                                   219 
460 /**                                            !! 220 #ifndef arch_atomic_read_acquire
461  * raw_atomic_read_acquire() - atomic load wit << 
462  * @v: pointer to atomic_t                     << 
463  *                                             << 
464  * Atomically loads the value of @v with acqui << 
465  *                                             << 
466  * Safe to use in noinstr code; prefer atomic_ << 
467  *                                             << 
468  * Return: The value loaded from @v.           << 
469  */                                            << 
470 static __always_inline int                        221 static __always_inline int
471 raw_atomic_read_acquire(const atomic_t *v)     !! 222 arch_atomic_read_acquire(const atomic_t *v)
472 {                                                 223 {
473 #if defined(arch_atomic_read_acquire)          << 
474         return arch_atomic_read_acquire(v);    << 
475 #else                                          << 
476         int ret;                                  224         int ret;
477                                                   225 
478         if (__native_word(atomic_t)) {            226         if (__native_word(atomic_t)) {
479                 ret = smp_load_acquire(&(v)->c    227                 ret = smp_load_acquire(&(v)->counter);
480         } else {                                  228         } else {
481                 ret = raw_atomic_read(v);      !! 229                 ret = arch_atomic_read(v);
482                 __atomic_acquire_fence();         230                 __atomic_acquire_fence();
483         }                                         231         }
484                                                   232 
485         return ret;                               233         return ret;
486 #endif                                         << 
487 }                                                 234 }
                                                   >> 235 #define arch_atomic_read_acquire arch_atomic_read_acquire
                                                   >> 236 #endif
488                                                   237 
489 /**                                            !! 238 #ifndef arch_atomic_set_release
490  * raw_atomic_set() - atomic set with relaxed  << 
491  * @v: pointer to atomic_t                     << 
492  * @i: int value to assign                     << 
493  *                                             << 
494  * Atomically sets @v to @i with relaxed order << 
495  *                                             << 
496  * Safe to use in noinstr code; prefer atomic_ << 
497  *                                             << 
498  * Return: Nothing.                            << 
499  */                                            << 
500 static __always_inline void                       239 static __always_inline void
501 raw_atomic_set(atomic_t *v, int i)             !! 240 arch_atomic_set_release(atomic_t *v, int i)
502 {                                                 241 {
503         arch_atomic_set(v, i);                 << 
504 }                                              << 
505                                                << 
506 /**                                            << 
507  * raw_atomic_set_release() - atomic set with  << 
508  * @v: pointer to atomic_t                     << 
509  * @i: int value to assign                     << 
510  *                                             << 
511  * Atomically sets @v to @i with release order << 
512  *                                             << 
513  * Safe to use in noinstr code; prefer atomic_ << 
514  *                                             << 
515  * Return: Nothing.                            << 
516  */                                            << 
517 static __always_inline void                    << 
518 raw_atomic_set_release(atomic_t *v, int i)     << 
519 {                                              << 
520 #if defined(arch_atomic_set_release)           << 
521         arch_atomic_set_release(v, i);         << 
522 #else                                          << 
523         if (__native_word(atomic_t)) {            242         if (__native_word(atomic_t)) {
524                 smp_store_release(&(v)->counte    243                 smp_store_release(&(v)->counter, i);
525         } else {                                  244         } else {
526                 __atomic_release_fence();         245                 __atomic_release_fence();
527                 raw_atomic_set(v, i);          !! 246                 arch_atomic_set(v, i);
528         }                                         247         }
                                                   >> 248 }
                                                   >> 249 #define arch_atomic_set_release arch_atomic_set_release
529 #endif                                            250 #endif
                                                   >> 251 
                                                   >> 252 #ifndef arch_atomic_add_return_relaxed
                                                   >> 253 #define arch_atomic_add_return_acquire arch_atomic_add_return
                                                   >> 254 #define arch_atomic_add_return_release arch_atomic_add_return
                                                   >> 255 #define arch_atomic_add_return_relaxed arch_atomic_add_return
                                                   >> 256 #else /* arch_atomic_add_return_relaxed */
                                                   >> 257 
                                                   >> 258 #ifndef arch_atomic_add_return_acquire
                                                   >> 259 static __always_inline int
                                                   >> 260 arch_atomic_add_return_acquire(int i, atomic_t *v)
                                                   >> 261 {
                                                   >> 262         int ret = arch_atomic_add_return_relaxed(i, v);
                                                   >> 263         __atomic_acquire_fence();
                                                   >> 264         return ret;
530 }                                                 265 }
                                                   >> 266 #define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
                                                   >> 267 #endif
531                                                   268 
532 /**                                            !! 269 #ifndef arch_atomic_add_return_release
533  * raw_atomic_add() - atomic add with relaxed  !! 270 static __always_inline int
534  * @i: int value to add                        !! 271 arch_atomic_add_return_release(int i, atomic_t *v)
535  * @v: pointer to atomic_t                     << 
536  *                                             << 
537  * Atomically updates @v to (@v + @i) with rel << 
538  *                                             << 
539  * Safe to use in noinstr code; prefer atomic_ << 
540  *                                             << 
541  * Return: Nothing.                            << 
542  */                                            << 
543 static __always_inline void                    << 
544 raw_atomic_add(int i, atomic_t *v)             << 
545 {                                                 272 {
546         arch_atomic_add(i, v);                 !! 273         __atomic_release_fence();
                                                   >> 274         return arch_atomic_add_return_relaxed(i, v);
547 }                                                 275 }
                                                   >> 276 #define arch_atomic_add_return_release arch_atomic_add_return_release
                                                   >> 277 #endif
548                                                   278 
549 /**                                            !! 279 #ifndef arch_atomic_add_return
550  * raw_atomic_add_return() - atomic add with f << 
551  * @i: int value to add                        << 
552  * @v: pointer to atomic_t                     << 
553  *                                             << 
554  * Atomically updates @v to (@v + @i) with ful << 
555  *                                             << 
556  * Safe to use in noinstr code; prefer atomic_ << 
557  *                                             << 
558  * Return: The updated value of @v.            << 
559  */                                            << 
560 static __always_inline int                        280 static __always_inline int
561 raw_atomic_add_return(int i, atomic_t *v)      !! 281 arch_atomic_add_return(int i, atomic_t *v)
562 {                                                 282 {
563 #if defined(arch_atomic_add_return)            << 
564         return arch_atomic_add_return(i, v);   << 
565 #elif defined(arch_atomic_add_return_relaxed)  << 
566         int ret;                                  283         int ret;
567         __atomic_pre_full_fence();                284         __atomic_pre_full_fence();
568         ret = arch_atomic_add_return_relaxed(i    285         ret = arch_atomic_add_return_relaxed(i, v);
569         __atomic_post_full_fence();               286         __atomic_post_full_fence();
570         return ret;                               287         return ret;
571 #else                                          << 
572 #error "Unable to define raw_atomic_add_return << 
573 #endif                                         << 
574 }                                                 288 }
                                                   >> 289 #define arch_atomic_add_return arch_atomic_add_return
                                                   >> 290 #endif
575                                                   291 
576 /**                                            !! 292 #endif /* arch_atomic_add_return_relaxed */
577  * raw_atomic_add_return_acquire() - atomic ad !! 293 
578  * @i: int value to add                        !! 294 #ifndef arch_atomic_fetch_add_relaxed
579  * @v: pointer to atomic_t                     !! 295 #define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
580  *                                             !! 296 #define arch_atomic_fetch_add_release arch_atomic_fetch_add
581  * Atomically updates @v to (@v + @i) with acq !! 297 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
582  *                                             !! 298 #else /* arch_atomic_fetch_add_relaxed */
583  * Safe to use in noinstr code; prefer atomic_ !! 299 
584  *                                             !! 300 #ifndef arch_atomic_fetch_add_acquire
585  * Return: The updated value of @v.            << 
586  */                                            << 
587 static __always_inline int                        301 static __always_inline int
588 raw_atomic_add_return_acquire(int i, atomic_t  !! 302 arch_atomic_fetch_add_acquire(int i, atomic_t *v)
589 {                                                 303 {
590 #if defined(arch_atomic_add_return_acquire)    !! 304         int ret = arch_atomic_fetch_add_relaxed(i, v);
591         return arch_atomic_add_return_acquire( << 
592 #elif defined(arch_atomic_add_return_relaxed)  << 
593         int ret = arch_atomic_add_return_relax << 
594         __atomic_acquire_fence();                 305         __atomic_acquire_fence();
595         return ret;                               306         return ret;
596 #elif defined(arch_atomic_add_return)          << 
597         return arch_atomic_add_return(i, v);   << 
598 #else                                          << 
599 #error "Unable to define raw_atomic_add_return << 
600 #endif                                         << 
601 }                                                 307 }
                                                   >> 308 #define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
                                                   >> 309 #endif
602                                                   310 
603 /**                                            !! 311 #ifndef arch_atomic_fetch_add_release
604  * raw_atomic_add_return_release() - atomic ad << 
605  * @i: int value to add                        << 
606  * @v: pointer to atomic_t                     << 
607  *                                             << 
608  * Atomically updates @v to (@v + @i) with rel << 
609  *                                             << 
610  * Safe to use in noinstr code; prefer atomic_ << 
611  *                                             << 
612  * Return: The updated value of @v.            << 
613  */                                            << 
614 static __always_inline int                        312 static __always_inline int
615 raw_atomic_add_return_release(int i, atomic_t  !! 313 arch_atomic_fetch_add_release(int i, atomic_t *v)
616 {                                                 314 {
617 #if defined(arch_atomic_add_return_release)    << 
618         return arch_atomic_add_return_release( << 
619 #elif defined(arch_atomic_add_return_relaxed)  << 
620         __atomic_release_fence();                 315         __atomic_release_fence();
621         return arch_atomic_add_return_relaxed( !! 316         return arch_atomic_fetch_add_relaxed(i, v);
622 #elif defined(arch_atomic_add_return)          << 
623         return arch_atomic_add_return(i, v);   << 
624 #else                                          << 
625 #error "Unable to define raw_atomic_add_return << 
626 #endif                                         << 
627 }                                                 317 }
628                                                !! 318 #define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
629 /**                                            << 
630  * raw_atomic_add_return_relaxed() - atomic ad << 
631  * @i: int value to add                        << 
632  * @v: pointer to atomic_t                     << 
633  *                                             << 
634  * Atomically updates @v to (@v + @i) with rel << 
635  *                                             << 
636  * Safe to use in noinstr code; prefer atomic_ << 
637  *                                             << 
638  * Return: The updated value of @v.            << 
639  */                                            << 
640 static __always_inline int                     << 
641 raw_atomic_add_return_relaxed(int i, atomic_t  << 
642 {                                              << 
643 #if defined(arch_atomic_add_return_relaxed)    << 
644         return arch_atomic_add_return_relaxed( << 
645 #elif defined(arch_atomic_add_return)          << 
646         return arch_atomic_add_return(i, v);   << 
647 #else                                          << 
648 #error "Unable to define raw_atomic_add_return << 
649 #endif                                            319 #endif
650 }                                              << 
651                                                   320 
652 /**                                            !! 321 #ifndef arch_atomic_fetch_add
653  * raw_atomic_fetch_add() - atomic add with fu << 
654  * @i: int value to add                        << 
655  * @v: pointer to atomic_t                     << 
656  *                                             << 
657  * Atomically updates @v to (@v + @i) with ful << 
658  *                                             << 
659  * Safe to use in noinstr code; prefer atomic_ << 
660  *                                             << 
661  * Return: The original value of @v.           << 
662  */                                            << 
663 static __always_inline int                        322 static __always_inline int
664 raw_atomic_fetch_add(int i, atomic_t *v)       !! 323 arch_atomic_fetch_add(int i, atomic_t *v)
665 {                                                 324 {
666 #if defined(arch_atomic_fetch_add)             << 
667         return arch_atomic_fetch_add(i, v);    << 
668 #elif defined(arch_atomic_fetch_add_relaxed)   << 
669         int ret;                                  325         int ret;
670         __atomic_pre_full_fence();                326         __atomic_pre_full_fence();
671         ret = arch_atomic_fetch_add_relaxed(i,    327         ret = arch_atomic_fetch_add_relaxed(i, v);
672         __atomic_post_full_fence();               328         __atomic_post_full_fence();
673         return ret;                               329         return ret;
674 #else                                          << 
675 #error "Unable to define raw_atomic_fetch_add" << 
676 #endif                                         << 
677 }                                                 330 }
                                                   >> 331 #define arch_atomic_fetch_add arch_atomic_fetch_add
                                                   >> 332 #endif
678                                                   333 
679 /**                                            !! 334 #endif /* arch_atomic_fetch_add_relaxed */
680  * raw_atomic_fetch_add_acquire() - atomic add !! 335 
681  * @i: int value to add                        !! 336 #ifndef arch_atomic_sub_return_relaxed
682  * @v: pointer to atomic_t                     !! 337 #define arch_atomic_sub_return_acquire arch_atomic_sub_return
683  *                                             !! 338 #define arch_atomic_sub_return_release arch_atomic_sub_return
684  * Atomically updates @v to (@v + @i) with acq !! 339 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return
685  *                                             !! 340 #else /* arch_atomic_sub_return_relaxed */
686  * Safe to use in noinstr code; prefer atomic_ !! 341 
687  *                                             !! 342 #ifndef arch_atomic_sub_return_acquire
688  * Return: The original value of @v.           << 
689  */                                            << 
690 static __always_inline int                        343 static __always_inline int
691 raw_atomic_fetch_add_acquire(int i, atomic_t * !! 344 arch_atomic_sub_return_acquire(int i, atomic_t *v)
692 {                                                 345 {
693 #if defined(arch_atomic_fetch_add_acquire)     !! 346         int ret = arch_atomic_sub_return_relaxed(i, v);
694         return arch_atomic_fetch_add_acquire(i << 
695 #elif defined(arch_atomic_fetch_add_relaxed)   << 
696         int ret = arch_atomic_fetch_add_relaxe << 
697         __atomic_acquire_fence();                 347         __atomic_acquire_fence();
698         return ret;                               348         return ret;
699 #elif defined(arch_atomic_fetch_add)           << 
700         return arch_atomic_fetch_add(i, v);    << 
701 #else                                          << 
702 #error "Unable to define raw_atomic_fetch_add_ << 
703 #endif                                         << 
704 }                                                 349 }
                                                   >> 350 #define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
                                                   >> 351 #endif
705                                                   352 
706 /**                                            !! 353 #ifndef arch_atomic_sub_return_release
707  * raw_atomic_fetch_add_release() - atomic add << 
708  * @i: int value to add                        << 
709  * @v: pointer to atomic_t                     << 
710  *                                             << 
711  * Atomically updates @v to (@v + @i) with rel << 
712  *                                             << 
713  * Safe to use in noinstr code; prefer atomic_ << 
714  *                                             << 
715  * Return: The original value of @v.           << 
716  */                                            << 
717 static __always_inline int                        354 static __always_inline int
718 raw_atomic_fetch_add_release(int i, atomic_t * !! 355 arch_atomic_sub_return_release(int i, atomic_t *v)
719 {                                                 356 {
720 #if defined(arch_atomic_fetch_add_release)     << 
721         return arch_atomic_fetch_add_release(i << 
722 #elif defined(arch_atomic_fetch_add_relaxed)   << 
723         __atomic_release_fence();                 357         __atomic_release_fence();
724         return arch_atomic_fetch_add_relaxed(i !! 358         return arch_atomic_sub_return_relaxed(i, v);
725 #elif defined(arch_atomic_fetch_add)           << 
726         return arch_atomic_fetch_add(i, v);    << 
727 #else                                          << 
728 #error "Unable to define raw_atomic_fetch_add_ << 
729 #endif                                         << 
730 }                                                 359 }
731                                                !! 360 #define arch_atomic_sub_return_release arch_atomic_sub_return_release
732 /**                                            << 
733  * raw_atomic_fetch_add_relaxed() - atomic add << 
734  * @i: int value to add                        << 
735  * @v: pointer to atomic_t                     << 
736  *                                             << 
737  * Atomically updates @v to (@v + @i) with rel << 
738  *                                             << 
739  * Safe to use in noinstr code; prefer atomic_ << 
740  *                                             << 
741  * Return: The original value of @v.           << 
742  */                                            << 
743 static __always_inline int                     << 
744 raw_atomic_fetch_add_relaxed(int i, atomic_t * << 
745 {                                              << 
746 #if defined(arch_atomic_fetch_add_relaxed)     << 
747         return arch_atomic_fetch_add_relaxed(i << 
748 #elif defined(arch_atomic_fetch_add)           << 
749         return arch_atomic_fetch_add(i, v);    << 
750 #else                                          << 
751 #error "Unable to define raw_atomic_fetch_add_ << 
752 #endif                                            361 #endif
753 }                                              << 
754                                                << 
755 /**                                            << 
756  * raw_atomic_sub() - atomic subtract with rel << 
757  * @i: int value to subtract                   << 
758  * @v: pointer to atomic_t                     << 
759  *                                             << 
760  * Atomically updates @v to (@v - @i) with rel << 
761  *                                             << 
762  * Safe to use in noinstr code; prefer atomic_ << 
763  *                                             << 
764  * Return: Nothing.                            << 
765  */                                            << 
766 static __always_inline void                    << 
767 raw_atomic_sub(int i, atomic_t *v)             << 
768 {                                              << 
769         arch_atomic_sub(i, v);                 << 
770 }                                              << 
771                                                   362 
772 /**                                            !! 363 #ifndef arch_atomic_sub_return
773  * raw_atomic_sub_return() - atomic subtract w << 
774  * @i: int value to subtract                   << 
775  * @v: pointer to atomic_t                     << 
776  *                                             << 
777  * Atomically updates @v to (@v - @i) with ful << 
778  *                                             << 
779  * Safe to use in noinstr code; prefer atomic_ << 
780  *                                             << 
781  * Return: The updated value of @v.            << 
782  */                                            << 
783 static __always_inline int                        364 static __always_inline int
784 raw_atomic_sub_return(int i, atomic_t *v)      !! 365 arch_atomic_sub_return(int i, atomic_t *v)
785 {                                                 366 {
786 #if defined(arch_atomic_sub_return)            << 
787         return arch_atomic_sub_return(i, v);   << 
788 #elif defined(arch_atomic_sub_return_relaxed)  << 
789         int ret;                                  367         int ret;
790         __atomic_pre_full_fence();                368         __atomic_pre_full_fence();
791         ret = arch_atomic_sub_return_relaxed(i    369         ret = arch_atomic_sub_return_relaxed(i, v);
792         __atomic_post_full_fence();               370         __atomic_post_full_fence();
793         return ret;                               371         return ret;
794 #else                                          << 
795 #error "Unable to define raw_atomic_sub_return << 
796 #endif                                         << 
797 }                                                 372 }
                                                   >> 373 #define arch_atomic_sub_return arch_atomic_sub_return
                                                   >> 374 #endif
798                                                   375 
799 /**                                            !! 376 #endif /* arch_atomic_sub_return_relaxed */
800  * raw_atomic_sub_return_acquire() - atomic su !! 377 
801  * @i: int value to subtract                   !! 378 #ifndef arch_atomic_fetch_sub_relaxed
802  * @v: pointer to atomic_t                     !! 379 #define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
803  *                                             !! 380 #define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
804  * Atomically updates @v to (@v - @i) with acq !! 381 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
805  *                                             !! 382 #else /* arch_atomic_fetch_sub_relaxed */
806  * Safe to use in noinstr code; prefer atomic_ !! 383 
807  *                                             !! 384 #ifndef arch_atomic_fetch_sub_acquire
808  * Return: The updated value of @v.            << 
809  */                                            << 
810 static __always_inline int                        385 static __always_inline int
811 raw_atomic_sub_return_acquire(int i, atomic_t  !! 386 arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
812 {                                                 387 {
813 #if defined(arch_atomic_sub_return_acquire)    !! 388         int ret = arch_atomic_fetch_sub_relaxed(i, v);
814         return arch_atomic_sub_return_acquire( << 
815 #elif defined(arch_atomic_sub_return_relaxed)  << 
816         int ret = arch_atomic_sub_return_relax << 
817         __atomic_acquire_fence();                 389         __atomic_acquire_fence();
818         return ret;                               390         return ret;
819 #elif defined(arch_atomic_sub_return)          << 
820         return arch_atomic_sub_return(i, v);   << 
821 #else                                          << 
822 #error "Unable to define raw_atomic_sub_return << 
823 #endif                                         << 
824 }                                                 391 }
                                                   >> 392 #define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
                                                   >> 393 #endif
825                                                   394 
826 /**                                            !! 395 #ifndef arch_atomic_fetch_sub_release
827  * raw_atomic_sub_return_release() - atomic su << 
828  * @i: int value to subtract                   << 
829  * @v: pointer to atomic_t                     << 
830  *                                             << 
831  * Atomically updates @v to (@v - @i) with rel << 
832  *                                             << 
833  * Safe to use in noinstr code; prefer atomic_ << 
834  *                                             << 
835  * Return: The updated value of @v.            << 
836  */                                            << 
837 static __always_inline int                        396 static __always_inline int
838 raw_atomic_sub_return_release(int i, atomic_t  !! 397 arch_atomic_fetch_sub_release(int i, atomic_t *v)
839 {                                                 398 {
840 #if defined(arch_atomic_sub_return_release)    << 
841         return arch_atomic_sub_return_release( << 
842 #elif defined(arch_atomic_sub_return_relaxed)  << 
843         __atomic_release_fence();                 399         __atomic_release_fence();
844         return arch_atomic_sub_return_relaxed( !! 400         return arch_atomic_fetch_sub_relaxed(i, v);
845 #elif defined(arch_atomic_sub_return)          << 
846         return arch_atomic_sub_return(i, v);   << 
847 #else                                          << 
848 #error "Unable to define raw_atomic_sub_return << 
849 #endif                                         << 
850 }                                                 401 }
851                                                !! 402 #define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
852 /**                                            << 
853  * raw_atomic_sub_return_relaxed() - atomic su << 
854  * @i: int value to subtract                   << 
855  * @v: pointer to atomic_t                     << 
856  *                                             << 
857  * Atomically updates @v to (@v - @i) with rel << 
858  *                                             << 
859  * Safe to use in noinstr code; prefer atomic_ << 
860  *                                             << 
861  * Return: The updated value of @v.            << 
862  */                                            << 
863 static __always_inline int                     << 
864 raw_atomic_sub_return_relaxed(int i, atomic_t  << 
865 {                                              << 
866 #if defined(arch_atomic_sub_return_relaxed)    << 
867         return arch_atomic_sub_return_relaxed( << 
868 #elif defined(arch_atomic_sub_return)          << 
869         return arch_atomic_sub_return(i, v);   << 
870 #else                                          << 
871 #error "Unable to define raw_atomic_sub_return << 
872 #endif                                            403 #endif
873 }                                              << 
874                                                   404 
875 /**                                            !! 405 #ifndef arch_atomic_fetch_sub
876  * raw_atomic_fetch_sub() - atomic subtract wi << 
877  * @i: int value to subtract                   << 
878  * @v: pointer to atomic_t                     << 
879  *                                             << 
880  * Atomically updates @v to (@v - @i) with ful << 
881  *                                             << 
882  * Safe to use in noinstr code; prefer atomic_ << 
883  *                                             << 
884  * Return: The original value of @v.           << 
885  */                                            << 
886 static __always_inline int                        406 static __always_inline int
887 raw_atomic_fetch_sub(int i, atomic_t *v)       !! 407 arch_atomic_fetch_sub(int i, atomic_t *v)
888 {                                                 408 {
889 #if defined(arch_atomic_fetch_sub)             << 
890         return arch_atomic_fetch_sub(i, v);    << 
891 #elif defined(arch_atomic_fetch_sub_relaxed)   << 
892         int ret;                                  409         int ret;
893         __atomic_pre_full_fence();                410         __atomic_pre_full_fence();
894         ret = arch_atomic_fetch_sub_relaxed(i,    411         ret = arch_atomic_fetch_sub_relaxed(i, v);
895         __atomic_post_full_fence();               412         __atomic_post_full_fence();
896         return ret;                               413         return ret;
897 #else                                          !! 414 }
898 #error "Unable to define raw_atomic_fetch_sub" !! 415 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
899 #endif                                            416 #endif
                                                   >> 417 
                                                   >> 418 #endif /* arch_atomic_fetch_sub_relaxed */
                                                   >> 419 
                                                   >> 420 #ifndef arch_atomic_inc
                                                   >> 421 static __always_inline void
                                                   >> 422 arch_atomic_inc(atomic_t *v)
                                                   >> 423 {
                                                   >> 424         arch_atomic_add(1, v);
900 }                                                 425 }
                                                   >> 426 #define arch_atomic_inc arch_atomic_inc
                                                   >> 427 #endif
901                                                   428 
902 /**                                            !! 429 #ifndef arch_atomic_inc_return_relaxed
903  * raw_atomic_fetch_sub_acquire() - atomic sub !! 430 #ifdef arch_atomic_inc_return
904  * @i: int value to subtract                   !! 431 #define arch_atomic_inc_return_acquire arch_atomic_inc_return
905  * @v: pointer to atomic_t                     !! 432 #define arch_atomic_inc_return_release arch_atomic_inc_return
906  *                                             !! 433 #define arch_atomic_inc_return_relaxed arch_atomic_inc_return
907  * Atomically updates @v to (@v - @i) with acq !! 434 #endif /* arch_atomic_inc_return */
908  *                                             !! 435 
909  * Safe to use in noinstr code; prefer atomic_ !! 436 #ifndef arch_atomic_inc_return
910  *                                             << 
911  * Return: The original value of @v.           << 
912  */                                            << 
913 static __always_inline int                        437 static __always_inline int
914 raw_atomic_fetch_sub_acquire(int i, atomic_t * !! 438 arch_atomic_inc_return(atomic_t *v)
915 {                                                 439 {
916 #if defined(arch_atomic_fetch_sub_acquire)     !! 440         return arch_atomic_add_return(1, v);
917         return arch_atomic_fetch_sub_acquire(i << 
918 #elif defined(arch_atomic_fetch_sub_relaxed)   << 
919         int ret = arch_atomic_fetch_sub_relaxe << 
920         __atomic_acquire_fence();              << 
921         return ret;                            << 
922 #elif defined(arch_atomic_fetch_sub)           << 
923         return arch_atomic_fetch_sub(i, v);    << 
924 #else                                          << 
925 #error "Unable to define raw_atomic_fetch_sub_ << 
926 #endif                                         << 
927 }                                                 441 }
                                                   >> 442 #define arch_atomic_inc_return arch_atomic_inc_return
                                                   >> 443 #endif
928                                                   444 
929 /**                                            !! 445 #ifndef arch_atomic_inc_return_acquire
930  * raw_atomic_fetch_sub_release() - atomic sub << 
931  * @i: int value to subtract                   << 
932  * @v: pointer to atomic_t                     << 
933  *                                             << 
934  * Atomically updates @v to (@v - @i) with rel << 
935  *                                             << 
936  * Safe to use in noinstr code; prefer atomic_ << 
937  *                                             << 
938  * Return: The original value of @v.           << 
939  */                                            << 
940 static __always_inline int                        446 static __always_inline int
941 raw_atomic_fetch_sub_release(int i, atomic_t * !! 447 arch_atomic_inc_return_acquire(atomic_t *v)
942 {                                                 448 {
943 #if defined(arch_atomic_fetch_sub_release)     !! 449         return arch_atomic_add_return_acquire(1, v);
944         return arch_atomic_fetch_sub_release(i << 
945 #elif defined(arch_atomic_fetch_sub_relaxed)   << 
946         __atomic_release_fence();              << 
947         return arch_atomic_fetch_sub_relaxed(i << 
948 #elif defined(arch_atomic_fetch_sub)           << 
949         return arch_atomic_fetch_sub(i, v);    << 
950 #else                                          << 
951 #error "Unable to define raw_atomic_fetch_sub_ << 
952 #endif                                         << 
953 }                                                 450 }
                                                   >> 451 #define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
                                                   >> 452 #endif
954                                                   453 
955 /**                                            !! 454 #ifndef arch_atomic_inc_return_release
956  * raw_atomic_fetch_sub_relaxed() - atomic sub << 
957  * @i: int value to subtract                   << 
958  * @v: pointer to atomic_t                     << 
959  *                                             << 
960  * Atomically updates @v to (@v - @i) with rel << 
961  *                                             << 
962  * Safe to use in noinstr code; prefer atomic_ << 
963  *                                             << 
964  * Return: The original value of @v.           << 
965  */                                            << 
966 static __always_inline int                        455 static __always_inline int
967 raw_atomic_fetch_sub_relaxed(int i, atomic_t * !! 456 arch_atomic_inc_return_release(atomic_t *v)
968 {                                                 457 {
969 #if defined(arch_atomic_fetch_sub_relaxed)     !! 458         return arch_atomic_add_return_release(1, v);
970         return arch_atomic_fetch_sub_relaxed(i !! 459 }
971 #elif defined(arch_atomic_fetch_sub)           !! 460 #define arch_atomic_inc_return_release arch_atomic_inc_return_release
972         return arch_atomic_fetch_sub(i, v);    << 
973 #else                                          << 
974 #error "Unable to define raw_atomic_fetch_sub_ << 
975 #endif                                            461 #endif
                                                   >> 462 
                                                   >> 463 #ifndef arch_atomic_inc_return_relaxed
                                                   >> 464 static __always_inline int
                                                   >> 465 arch_atomic_inc_return_relaxed(atomic_t *v)
                                                   >> 466 {
                                                   >> 467         return arch_atomic_add_return_relaxed(1, v);
976 }                                                 468 }
                                                   >> 469 #define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
                                                   >> 470 #endif
977                                                   471 
978 /**                                            !! 472 #else /* arch_atomic_inc_return_relaxed */
979  * raw_atomic_inc() - atomic increment with re !! 473 
980  * @v: pointer to atomic_t                     !! 474 #ifndef arch_atomic_inc_return_acquire
981  *                                             !! 475 static __always_inline int
982  * Atomically updates @v to (@v + 1) with rela !! 476 arch_atomic_inc_return_acquire(atomic_t *v)
983  *                                             << 
984  * Safe to use in noinstr code; prefer atomic_ << 
985  *                                             << 
986  * Return: Nothing.                            << 
987  */                                            << 
988 static __always_inline void                    << 
989 raw_atomic_inc(atomic_t *v)                    << 
990 {                                                 477 {
991 #if defined(arch_atomic_inc)                   !! 478         int ret = arch_atomic_inc_return_relaxed(v);
992         arch_atomic_inc(v);                    !! 479         __atomic_acquire_fence();
993 #else                                          !! 480         return ret;
994         raw_atomic_add(1, v);                  !! 481 }
                                                   >> 482 #define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
995 #endif                                            483 #endif
                                                   >> 484 
                                                   >> 485 #ifndef arch_atomic_inc_return_release
                                                   >> 486 static __always_inline int
                                                   >> 487 arch_atomic_inc_return_release(atomic_t *v)
                                                   >> 488 {
                                                   >> 489         __atomic_release_fence();
                                                   >> 490         return arch_atomic_inc_return_relaxed(v);
996 }                                                 491 }
                                                   >> 492 #define arch_atomic_inc_return_release arch_atomic_inc_return_release
                                                   >> 493 #endif
997                                                   494 
998 /**                                            !! 495 #ifndef arch_atomic_inc_return
999  * raw_atomic_inc_return() - atomic increment  << 
1000  * @v: pointer to atomic_t                    << 
1001  *                                            << 
1002  * Atomically updates @v to (@v + 1) with ful << 
1003  *                                            << 
1004  * Safe to use in noinstr code; prefer atomic << 
1005  *                                            << 
1006  * Return: The updated value of @v.           << 
1007  */                                           << 
1008 static __always_inline int                       496 static __always_inline int
1009 raw_atomic_inc_return(atomic_t *v)            !! 497 arch_atomic_inc_return(atomic_t *v)
1010 {                                                498 {
1011 #if defined(arch_atomic_inc_return)           << 
1012         return arch_atomic_inc_return(v);     << 
1013 #elif defined(arch_atomic_inc_return_relaxed) << 
1014         int ret;                                 499         int ret;
1015         __atomic_pre_full_fence();               500         __atomic_pre_full_fence();
1016         ret = arch_atomic_inc_return_relaxed(    501         ret = arch_atomic_inc_return_relaxed(v);
1017         __atomic_post_full_fence();              502         __atomic_post_full_fence();
1018         return ret;                              503         return ret;
1019 #else                                         << 
1020         return raw_atomic_add_return(1, v);   << 
1021 #endif                                        << 
1022 }                                                504 }
                                                   >> 505 #define arch_atomic_inc_return arch_atomic_inc_return
                                                   >> 506 #endif
1023                                                  507 
1024 /**                                           !! 508 #endif /* arch_atomic_inc_return_relaxed */
1025  * raw_atomic_inc_return_acquire() - atomic i !! 509 
1026  * @v: pointer to atomic_t                    !! 510 #ifndef arch_atomic_fetch_inc_relaxed
1027  *                                            !! 511 #ifdef arch_atomic_fetch_inc
1028  * Atomically updates @v to (@v + 1) with acq !! 512 #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
1029  *                                            !! 513 #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
1030  * Safe to use in noinstr code; prefer atomic !! 514 #define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
1031  *                                            !! 515 #endif /* arch_atomic_fetch_inc */
1032  * Return: The updated value of @v.           !! 516 
1033  */                                           !! 517 #ifndef arch_atomic_fetch_inc
1034 static __always_inline int                       518 static __always_inline int
1035 raw_atomic_inc_return_acquire(atomic_t *v)    !! 519 arch_atomic_fetch_inc(atomic_t *v)
1036 {                                                520 {
1037 #if defined(arch_atomic_inc_return_acquire)   !! 521         return arch_atomic_fetch_add(1, v);
1038         return arch_atomic_inc_return_acquire << 
1039 #elif defined(arch_atomic_inc_return_relaxed) << 
1040         int ret = arch_atomic_inc_return_rela << 
1041         __atomic_acquire_fence();             << 
1042         return ret;                           << 
1043 #elif defined(arch_atomic_inc_return)         << 
1044         return arch_atomic_inc_return(v);     << 
1045 #else                                         << 
1046         return raw_atomic_add_return_acquire( << 
1047 #endif                                        << 
1048 }                                                522 }
                                                   >> 523 #define arch_atomic_fetch_inc arch_atomic_fetch_inc
                                                   >> 524 #endif
1049                                                  525 
1050 /**                                           !! 526 #ifndef arch_atomic_fetch_inc_acquire
1051  * raw_atomic_inc_return_release() - atomic i << 
1052  * @v: pointer to atomic_t                    << 
1053  *                                            << 
1054  * Atomically updates @v to (@v + 1) with rel << 
1055  *                                            << 
1056  * Safe to use in noinstr code; prefer atomic << 
1057  *                                            << 
1058  * Return: The updated value of @v.           << 
1059  */                                           << 
1060 static __always_inline int                       527 static __always_inline int
1061 raw_atomic_inc_return_release(atomic_t *v)    !! 528 arch_atomic_fetch_inc_acquire(atomic_t *v)
1062 {                                                529 {
1063 #if defined(arch_atomic_inc_return_release)   !! 530         return arch_atomic_fetch_add_acquire(1, v);
1064         return arch_atomic_inc_return_release << 
1065 #elif defined(arch_atomic_inc_return_relaxed) << 
1066         __atomic_release_fence();             << 
1067         return arch_atomic_inc_return_relaxed << 
1068 #elif defined(arch_atomic_inc_return)         << 
1069         return arch_atomic_inc_return(v);     << 
1070 #else                                         << 
1071         return raw_atomic_add_return_release( << 
1072 #endif                                        << 
1073 }                                                531 }
                                                   >> 532 #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
                                                   >> 533 #endif
1074                                                  534 
1075 /**                                           !! 535 #ifndef arch_atomic_fetch_inc_release
1076  * raw_atomic_inc_return_relaxed() - atomic i << 
1077  * @v: pointer to atomic_t                    << 
1078  *                                            << 
1079  * Atomically updates @v to (@v + 1) with rel << 
1080  *                                            << 
1081  * Safe to use in noinstr code; prefer atomic << 
1082  *                                            << 
1083  * Return: The updated value of @v.           << 
1084  */                                           << 
1085 static __always_inline int                       536 static __always_inline int
1086 raw_atomic_inc_return_relaxed(atomic_t *v)    !! 537 arch_atomic_fetch_inc_release(atomic_t *v)
1087 {                                                538 {
1088 #if defined(arch_atomic_inc_return_relaxed)   !! 539         return arch_atomic_fetch_add_release(1, v);
1089         return arch_atomic_inc_return_relaxed << 
1090 #elif defined(arch_atomic_inc_return)         << 
1091         return arch_atomic_inc_return(v);     << 
1092 #else                                         << 
1093         return raw_atomic_add_return_relaxed( << 
1094 #endif                                        << 
1095 }                                                540 }
                                                   >> 541 #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
                                                   >> 542 #endif
1096                                                  543 
1097 /**                                           !! 544 #ifndef arch_atomic_fetch_inc_relaxed
1098  * raw_atomic_fetch_inc() - atomic increment  << 
1099  * @v: pointer to atomic_t                    << 
1100  *                                            << 
1101  * Atomically updates @v to (@v + 1) with ful << 
1102  *                                            << 
1103  * Safe to use in noinstr code; prefer atomic << 
1104  *                                            << 
1105  * Return: The original value of @v.          << 
1106  */                                           << 
1107 static __always_inline int                       545 static __always_inline int
1108 raw_atomic_fetch_inc(atomic_t *v)             !! 546 arch_atomic_fetch_inc_relaxed(atomic_t *v)
1109 {                                                547 {
1110 #if defined(arch_atomic_fetch_inc)            !! 548         return arch_atomic_fetch_add_relaxed(1, v);
1111         return arch_atomic_fetch_inc(v);      << 
1112 #elif defined(arch_atomic_fetch_inc_relaxed)  << 
1113         int ret;                              << 
1114         __atomic_pre_full_fence();            << 
1115         ret = arch_atomic_fetch_inc_relaxed(v << 
1116         __atomic_post_full_fence();           << 
1117         return ret;                           << 
1118 #else                                         << 
1119         return raw_atomic_fetch_add(1, v);    << 
1120 #endif                                        << 
1121 }                                                549 }
                                                   >> 550 #define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
                                                   >> 551 #endif
1122                                                  552 
1123 /**                                           !! 553 #else /* arch_atomic_fetch_inc_relaxed */
1124  * raw_atomic_fetch_inc_acquire() - atomic in !! 554 
1125  * @v: pointer to atomic_t                    !! 555 #ifndef arch_atomic_fetch_inc_acquire
1126  *                                            << 
1127  * Atomically updates @v to (@v + 1) with acq << 
1128  *                                            << 
1129  * Safe to use in noinstr code; prefer atomic << 
1130  *                                            << 
1131  * Return: The original value of @v.          << 
1132  */                                           << 
1133 static __always_inline int                       556 static __always_inline int
1134 raw_atomic_fetch_inc_acquire(atomic_t *v)     !! 557 arch_atomic_fetch_inc_acquire(atomic_t *v)
1135 {                                                558 {
1136 #if defined(arch_atomic_fetch_inc_acquire)    << 
1137         return arch_atomic_fetch_inc_acquire( << 
1138 #elif defined(arch_atomic_fetch_inc_relaxed)  << 
1139         int ret = arch_atomic_fetch_inc_relax    559         int ret = arch_atomic_fetch_inc_relaxed(v);
1140         __atomic_acquire_fence();                560         __atomic_acquire_fence();
1141         return ret;                              561         return ret;
1142 #elif defined(arch_atomic_fetch_inc)          << 
1143         return arch_atomic_fetch_inc(v);      << 
1144 #else                                         << 
1145         return raw_atomic_fetch_add_acquire(1 << 
1146 #endif                                        << 
1147 }                                                562 }
                                                   >> 563 #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
                                                   >> 564 #endif
1148                                                  565 
1149 /**                                           !! 566 #ifndef arch_atomic_fetch_inc_release
1150  * raw_atomic_fetch_inc_release() - atomic in << 
1151  * @v: pointer to atomic_t                    << 
1152  *                                            << 
1153  * Atomically updates @v to (@v + 1) with rel << 
1154  *                                            << 
1155  * Safe to use in noinstr code; prefer atomic << 
1156  *                                            << 
1157  * Return: The original value of @v.          << 
1158  */                                           << 
1159 static __always_inline int                       567 static __always_inline int
1160 raw_atomic_fetch_inc_release(atomic_t *v)     !! 568 arch_atomic_fetch_inc_release(atomic_t *v)
1161 {                                                569 {
1162 #if defined(arch_atomic_fetch_inc_release)    << 
1163         return arch_atomic_fetch_inc_release( << 
1164 #elif defined(arch_atomic_fetch_inc_relaxed)  << 
1165         __atomic_release_fence();                570         __atomic_release_fence();
1166         return arch_atomic_fetch_inc_relaxed(    571         return arch_atomic_fetch_inc_relaxed(v);
1167 #elif defined(arch_atomic_fetch_inc)          << 
1168         return arch_atomic_fetch_inc(v);      << 
1169 #else                                         << 
1170         return raw_atomic_fetch_add_release(1 << 
1171 #endif                                        << 
1172 }                                                572 }
                                                   >> 573 #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
                                                   >> 574 #endif
1173                                                  575 
1174 /**                                           !! 576 #ifndef arch_atomic_fetch_inc
1175  * raw_atomic_fetch_inc_relaxed() - atomic in << 
1176  * @v: pointer to atomic_t                    << 
1177  *                                            << 
1178  * Atomically updates @v to (@v + 1) with rel << 
1179  *                                            << 
1180  * Safe to use in noinstr code; prefer atomic << 
1181  *                                            << 
1182  * Return: The original value of @v.          << 
1183  */                                           << 
1184 static __always_inline int                       577 static __always_inline int
1185 raw_atomic_fetch_inc_relaxed(atomic_t *v)     !! 578 arch_atomic_fetch_inc(atomic_t *v)
1186 {                                                579 {
1187 #if defined(arch_atomic_fetch_inc_relaxed)    !! 580         int ret;
1188         return arch_atomic_fetch_inc_relaxed( !! 581         __atomic_pre_full_fence();
1189 #elif defined(arch_atomic_fetch_inc)          !! 582         ret = arch_atomic_fetch_inc_relaxed(v);
1190         return arch_atomic_fetch_inc(v);      !! 583         __atomic_post_full_fence();
1191 #else                                         !! 584         return ret;
1192         return raw_atomic_fetch_add_relaxed(1 << 
1193 #endif                                        << 
1194 }                                                585 }
                                                   >> 586 #define arch_atomic_fetch_inc arch_atomic_fetch_inc
                                                   >> 587 #endif
1195                                                  588 
1196 /**                                           !! 589 #endif /* arch_atomic_fetch_inc_relaxed */
1197  * raw_atomic_dec() - atomic decrement with r !! 590 
1198  * @v: pointer to atomic_t                    !! 591 #ifndef arch_atomic_dec
1199  *                                            << 
1200  * Atomically updates @v to (@v - 1) with rel << 
1201  *                                            << 
1202  * Safe to use in noinstr code; prefer atomic << 
1203  *                                            << 
1204  * Return: Nothing.                           << 
1205  */                                           << 
1206 static __always_inline void                      592 static __always_inline void
1207 raw_atomic_dec(atomic_t *v)                   !! 593 arch_atomic_dec(atomic_t *v)
1208 {                                                594 {
1209 #if defined(arch_atomic_dec)                  !! 595         arch_atomic_sub(1, v);
1210         arch_atomic_dec(v);                   !! 596 }
1211 #else                                         !! 597 #define arch_atomic_dec arch_atomic_dec
1212         raw_atomic_sub(1, v);                 << 
1213 #endif                                           598 #endif
                                                   >> 599 
                                                   >> 600 #ifndef arch_atomic_dec_return_relaxed
                                                   >> 601 #ifdef arch_atomic_dec_return
                                                   >> 602 #define arch_atomic_dec_return_acquire arch_atomic_dec_return
                                                   >> 603 #define arch_atomic_dec_return_release arch_atomic_dec_return
                                                   >> 604 #define arch_atomic_dec_return_relaxed arch_atomic_dec_return
                                                   >> 605 #endif /* arch_atomic_dec_return */
                                                   >> 606 
                                                   >> 607 #ifndef arch_atomic_dec_return
                                                   >> 608 static __always_inline int
                                                   >> 609 arch_atomic_dec_return(atomic_t *v)
                                                   >> 610 {
                                                   >> 611         return arch_atomic_sub_return(1, v);
1214 }                                                612 }
                                                   >> 613 #define arch_atomic_dec_return arch_atomic_dec_return
                                                   >> 614 #endif
1215                                                  615 
1216 /**                                           !! 616 #ifndef arch_atomic_dec_return_acquire
1217  * raw_atomic_dec_return() - atomic decrement << 
1218  * @v: pointer to atomic_t                    << 
1219  *                                            << 
1220  * Atomically updates @v to (@v - 1) with ful << 
1221  *                                            << 
1222  * Safe to use in noinstr code; prefer atomic << 
1223  *                                            << 
1224  * Return: The updated value of @v.           << 
1225  */                                           << 
1226 static __always_inline int                       617 static __always_inline int
1227 raw_atomic_dec_return(atomic_t *v)            !! 618 arch_atomic_dec_return_acquire(atomic_t *v)
1228 {                                                619 {
1229 #if defined(arch_atomic_dec_return)           !! 620         return arch_atomic_sub_return_acquire(1, v);
1230         return arch_atomic_dec_return(v);     !! 621 }
1231 #elif defined(arch_atomic_dec_return_relaxed) !! 622 #define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
1232         int ret;                              << 
1233         __atomic_pre_full_fence();            << 
1234         ret = arch_atomic_dec_return_relaxed( << 
1235         __atomic_post_full_fence();           << 
1236         return ret;                           << 
1237 #else                                         << 
1238         return raw_atomic_sub_return(1, v);   << 
1239 #endif                                           623 #endif
                                                   >> 624 
                                                   >> 625 #ifndef arch_atomic_dec_return_release
                                                   >> 626 static __always_inline int
                                                   >> 627 arch_atomic_dec_return_release(atomic_t *v)
                                                   >> 628 {
                                                   >> 629         return arch_atomic_sub_return_release(1, v);
1240 }                                                630 }
                                                   >> 631 #define arch_atomic_dec_return_release arch_atomic_dec_return_release
                                                   >> 632 #endif
1241                                                  633 
1242 /**                                           !! 634 #ifndef arch_atomic_dec_return_relaxed
1243  * raw_atomic_dec_return_acquire() - atomic d !! 635 static __always_inline int
1244  * @v: pointer to atomic_t                    !! 636 arch_atomic_dec_return_relaxed(atomic_t *v)
1245  *                                            !! 637 {
1246  * Atomically updates @v to (@v - 1) with acq !! 638         return arch_atomic_sub_return_relaxed(1, v);
1247  *                                            !! 639 }
1248  * Safe to use in noinstr code; prefer atomic !! 640 #define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
1249  *                                            !! 641 #endif
1250  * Return: The updated value of @v.           !! 642 
1251  */                                           !! 643 #else /* arch_atomic_dec_return_relaxed */
                                                   >> 644 
                                                   >> 645 #ifndef arch_atomic_dec_return_acquire
1252 static __always_inline int                       646 static __always_inline int
1253 raw_atomic_dec_return_acquire(atomic_t *v)    !! 647 arch_atomic_dec_return_acquire(atomic_t *v)
1254 {                                                648 {
1255 #if defined(arch_atomic_dec_return_acquire)   << 
1256         return arch_atomic_dec_return_acquire << 
1257 #elif defined(arch_atomic_dec_return_relaxed) << 
1258         int ret = arch_atomic_dec_return_rela    649         int ret = arch_atomic_dec_return_relaxed(v);
1259         __atomic_acquire_fence();                650         __atomic_acquire_fence();
1260         return ret;                              651         return ret;
1261 #elif defined(arch_atomic_dec_return)         << 
1262         return arch_atomic_dec_return(v);     << 
1263 #else                                         << 
1264         return raw_atomic_sub_return_acquire( << 
1265 #endif                                        << 
1266 }                                                652 }
                                                   >> 653 #define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
                                                   >> 654 #endif
1267                                                  655 
1268 /**                                           !! 656 #ifndef arch_atomic_dec_return_release
1269  * raw_atomic_dec_return_release() - atomic d << 
1270  * @v: pointer to atomic_t                    << 
1271  *                                            << 
1272  * Atomically updates @v to (@v - 1) with rel << 
1273  *                                            << 
1274  * Safe to use in noinstr code; prefer atomic << 
1275  *                                            << 
1276  * Return: The updated value of @v.           << 
1277  */                                           << 
1278 static __always_inline int                       657 static __always_inline int
1279 raw_atomic_dec_return_release(atomic_t *v)    !! 658 arch_atomic_dec_return_release(atomic_t *v)
1280 {                                                659 {
1281 #if defined(arch_atomic_dec_return_release)   << 
1282         return arch_atomic_dec_return_release << 
1283 #elif defined(arch_atomic_dec_return_relaxed) << 
1284         __atomic_release_fence();                660         __atomic_release_fence();
1285         return arch_atomic_dec_return_relaxed    661         return arch_atomic_dec_return_relaxed(v);
1286 #elif defined(arch_atomic_dec_return)         << 
1287         return arch_atomic_dec_return(v);     << 
1288 #else                                         << 
1289         return raw_atomic_sub_return_release( << 
1290 #endif                                        << 
1291 }                                                662 }
1292                                               !! 663 #define arch_atomic_dec_return_release arch_atomic_dec_return_release
1293 /**                                           << 
1294  * raw_atomic_dec_return_relaxed() - atomic d << 
1295  * @v: pointer to atomic_t                    << 
1296  *                                            << 
1297  * Atomically updates @v to (@v - 1) with rel << 
1298  *                                            << 
1299  * Safe to use in noinstr code; prefer atomic << 
1300  *                                            << 
1301  * Return: The updated value of @v.           << 
1302  */                                           << 
1303 static __always_inline int                    << 
1304 raw_atomic_dec_return_relaxed(atomic_t *v)    << 
1305 {                                             << 
1306 #if defined(arch_atomic_dec_return_relaxed)   << 
1307         return arch_atomic_dec_return_relaxed << 
1308 #elif defined(arch_atomic_dec_return)         << 
1309         return arch_atomic_dec_return(v);     << 
1310 #else                                         << 
1311         return raw_atomic_sub_return_relaxed( << 
1312 #endif                                           664 #endif
1313 }                                             << 
1314                                                  665 
1315 /**                                           !! 666 #ifndef arch_atomic_dec_return
1316  * raw_atomic_fetch_dec() - atomic decrement  << 
1317  * @v: pointer to atomic_t                    << 
1318  *                                            << 
1319  * Atomically updates @v to (@v - 1) with ful << 
1320  *                                            << 
1321  * Safe to use in noinstr code; prefer atomic << 
1322  *                                            << 
1323  * Return: The original value of @v.          << 
1324  */                                           << 
1325 static __always_inline int                       667 static __always_inline int
1326 raw_atomic_fetch_dec(atomic_t *v)             !! 668 arch_atomic_dec_return(atomic_t *v)
1327 {                                                669 {
1328 #if defined(arch_atomic_fetch_dec)            << 
1329         return arch_atomic_fetch_dec(v);      << 
1330 #elif defined(arch_atomic_fetch_dec_relaxed)  << 
1331         int ret;                                 670         int ret;
1332         __atomic_pre_full_fence();               671         __atomic_pre_full_fence();
1333         ret = arch_atomic_fetch_dec_relaxed(v !! 672         ret = arch_atomic_dec_return_relaxed(v);
1334         __atomic_post_full_fence();              673         __atomic_post_full_fence();
1335         return ret;                              674         return ret;
1336 #else                                         << 
1337         return raw_atomic_fetch_sub(1, v);    << 
1338 #endif                                        << 
1339 }                                                675 }
                                                   >> 676 #define arch_atomic_dec_return arch_atomic_dec_return
                                                   >> 677 #endif
1340                                                  678 
1341 /**                                           !! 679 #endif /* arch_atomic_dec_return_relaxed */
1342  * raw_atomic_fetch_dec_acquire() - atomic de !! 680 
1343  * @v: pointer to atomic_t                    !! 681 #ifndef arch_atomic_fetch_dec_relaxed
1344  *                                            !! 682 #ifdef arch_atomic_fetch_dec
1345  * Atomically updates @v to (@v - 1) with acq !! 683 #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
1346  *                                            !! 684 #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
1347  * Safe to use in noinstr code; prefer atomic !! 685 #define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
1348  *                                            !! 686 #endif /* arch_atomic_fetch_dec */
1349  * Return: The original value of @v.          !! 687 
1350  */                                           !! 688 #ifndef arch_atomic_fetch_dec
1351 static __always_inline int                       689 static __always_inline int
1352 raw_atomic_fetch_dec_acquire(atomic_t *v)     !! 690 arch_atomic_fetch_dec(atomic_t *v)
1353 {                                                691 {
1354 #if defined(arch_atomic_fetch_dec_acquire)    !! 692         return arch_atomic_fetch_sub(1, v);
1355         return arch_atomic_fetch_dec_acquire( << 
1356 #elif defined(arch_atomic_fetch_dec_relaxed)  << 
1357         int ret = arch_atomic_fetch_dec_relax << 
1358         __atomic_acquire_fence();             << 
1359         return ret;                           << 
1360 #elif defined(arch_atomic_fetch_dec)          << 
1361         return arch_atomic_fetch_dec(v);      << 
1362 #else                                         << 
1363         return raw_atomic_fetch_sub_acquire(1 << 
1364 #endif                                        << 
1365 }                                                693 }
                                                   >> 694 #define arch_atomic_fetch_dec arch_atomic_fetch_dec
                                                   >> 695 #endif
1366                                                  696 
1367 /**                                           !! 697 #ifndef arch_atomic_fetch_dec_acquire
1368  * raw_atomic_fetch_dec_release() - atomic de << 
1369  * @v: pointer to atomic_t                    << 
1370  *                                            << 
1371  * Atomically updates @v to (@v - 1) with rel << 
1372  *                                            << 
1373  * Safe to use in noinstr code; prefer atomic << 
1374  *                                            << 
1375  * Return: The original value of @v.          << 
1376  */                                           << 
1377 static __always_inline int                       698 static __always_inline int
1378 raw_atomic_fetch_dec_release(atomic_t *v)     !! 699 arch_atomic_fetch_dec_acquire(atomic_t *v)
1379 {                                                700 {
1380 #if defined(arch_atomic_fetch_dec_release)    !! 701         return arch_atomic_fetch_sub_acquire(1, v);
1381         return arch_atomic_fetch_dec_release( !! 702 }
1382 #elif defined(arch_atomic_fetch_dec_relaxed)  !! 703 #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
1383         __atomic_release_fence();             << 
1384         return arch_atomic_fetch_dec_relaxed( << 
1385 #elif defined(arch_atomic_fetch_dec)          << 
1386         return arch_atomic_fetch_dec(v);      << 
1387 #else                                         << 
1388         return raw_atomic_fetch_sub_release(1 << 
1389 #endif                                           704 #endif
                                                   >> 705 
                                                   >> 706 #ifndef arch_atomic_fetch_dec_release
                                                   >> 707 static __always_inline int
                                                   >> 708 arch_atomic_fetch_dec_release(atomic_t *v)
                                                   >> 709 {
                                                   >> 710         return arch_atomic_fetch_sub_release(1, v);
1390 }                                                711 }
                                                   >> 712 #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
                                                   >> 713 #endif
1391                                                  714 
1392 /**                                           !! 715 #ifndef arch_atomic_fetch_dec_relaxed
1393  * raw_atomic_fetch_dec_relaxed() - atomic de << 
1394  * @v: pointer to atomic_t                    << 
1395  *                                            << 
1396  * Atomically updates @v to (@v - 1) with rel << 
1397  *                                            << 
1398  * Safe to use in noinstr code; prefer atomic << 
1399  *                                            << 
1400  * Return: The original value of @v.          << 
1401  */                                           << 
1402 static __always_inline int                       716 static __always_inline int
1403 raw_atomic_fetch_dec_relaxed(atomic_t *v)     !! 717 arch_atomic_fetch_dec_relaxed(atomic_t *v)
1404 {                                                718 {
1405 #if defined(arch_atomic_fetch_dec_relaxed)    !! 719         return arch_atomic_fetch_sub_relaxed(1, v);
1406         return arch_atomic_fetch_dec_relaxed( !! 720 }
1407 #elif defined(arch_atomic_fetch_dec)          !! 721 #define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
1408         return arch_atomic_fetch_dec(v);      << 
1409 #else                                         << 
1410         return raw_atomic_fetch_sub_relaxed(1 << 
1411 #endif                                           722 #endif
                                                   >> 723 
                                                   >> 724 #else /* arch_atomic_fetch_dec_relaxed */
                                                   >> 725 
                                                   >> 726 #ifndef arch_atomic_fetch_dec_acquire
                                                   >> 727 static __always_inline int
                                                   >> 728 arch_atomic_fetch_dec_acquire(atomic_t *v)
                                                   >> 729 {
                                                   >> 730         int ret = arch_atomic_fetch_dec_relaxed(v);
                                                   >> 731         __atomic_acquire_fence();
                                                   >> 732         return ret;
1412 }                                                733 }
                                                   >> 734 #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
                                                   >> 735 #endif
1413                                                  736 
1414 /**                                           !! 737 #ifndef arch_atomic_fetch_dec_release
1415  * raw_atomic_and() - atomic bitwise AND with !! 738 static __always_inline int
1416  * @i: int value                              !! 739 arch_atomic_fetch_dec_release(atomic_t *v)
1417  * @v: pointer to atomic_t                    << 
1418  *                                            << 
1419  * Atomically updates @v to (@v & @i) with re << 
1420  *                                            << 
1421  * Safe to use in noinstr code; prefer atomic << 
1422  *                                            << 
1423  * Return: Nothing.                           << 
1424  */                                           << 
1425 static __always_inline void                   << 
1426 raw_atomic_and(int i, atomic_t *v)            << 
1427 {                                                740 {
1428         arch_atomic_and(i, v);                !! 741         __atomic_release_fence();
                                                   >> 742         return arch_atomic_fetch_dec_relaxed(v);
1429 }                                                743 }
                                                   >> 744 #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
                                                   >> 745 #endif
1430                                                  746 
1431 /**                                           !! 747 #ifndef arch_atomic_fetch_dec
1432  * raw_atomic_fetch_and() - atomic bitwise AN << 
1433  * @i: int value                              << 
1434  * @v: pointer to atomic_t                    << 
1435  *                                            << 
1436  * Atomically updates @v to (@v & @i) with fu << 
1437  *                                            << 
1438  * Safe to use in noinstr code; prefer atomic << 
1439  *                                            << 
1440  * Return: The original value of @v.          << 
1441  */                                           << 
1442 static __always_inline int                       748 static __always_inline int
1443 raw_atomic_fetch_and(int i, atomic_t *v)      !! 749 arch_atomic_fetch_dec(atomic_t *v)
1444 {                                                750 {
1445 #if defined(arch_atomic_fetch_and)            << 
1446         return arch_atomic_fetch_and(i, v);   << 
1447 #elif defined(arch_atomic_fetch_and_relaxed)  << 
1448         int ret;                                 751         int ret;
1449         __atomic_pre_full_fence();               752         __atomic_pre_full_fence();
1450         ret = arch_atomic_fetch_and_relaxed(i !! 753         ret = arch_atomic_fetch_dec_relaxed(v);
1451         __atomic_post_full_fence();              754         __atomic_post_full_fence();
1452         return ret;                              755         return ret;
1453 #else                                         << 
1454 #error "Unable to define raw_atomic_fetch_and << 
1455 #endif                                        << 
1456 }                                                756 }
                                                   >> 757 #define arch_atomic_fetch_dec arch_atomic_fetch_dec
                                                   >> 758 #endif
1457                                                  759 
1458 /**                                           !! 760 #endif /* arch_atomic_fetch_dec_relaxed */
1459  * raw_atomic_fetch_and_acquire() - atomic bi !! 761 
1460  * @i: int value                              !! 762 #ifndef arch_atomic_fetch_and_relaxed
1461  * @v: pointer to atomic_t                    !! 763 #define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
1462  *                                            !! 764 #define arch_atomic_fetch_and_release arch_atomic_fetch_and
1463  * Atomically updates @v to (@v & @i) with ac !! 765 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
1464  *                                            !! 766 #else /* arch_atomic_fetch_and_relaxed */
1465  * Safe to use in noinstr code; prefer atomic !! 767 
1466  *                                            !! 768 #ifndef arch_atomic_fetch_and_acquire
1467  * Return: The original value of @v.          << 
1468  */                                           << 
1469 static __always_inline int                       769 static __always_inline int
1470 raw_atomic_fetch_and_acquire(int i, atomic_t  !! 770 arch_atomic_fetch_and_acquire(int i, atomic_t *v)
1471 {                                                771 {
1472 #if defined(arch_atomic_fetch_and_acquire)    << 
1473         return arch_atomic_fetch_and_acquire( << 
1474 #elif defined(arch_atomic_fetch_and_relaxed)  << 
1475         int ret = arch_atomic_fetch_and_relax    772         int ret = arch_atomic_fetch_and_relaxed(i, v);
1476         __atomic_acquire_fence();                773         __atomic_acquire_fence();
1477         return ret;                              774         return ret;
1478 #elif defined(arch_atomic_fetch_and)          << 
1479         return arch_atomic_fetch_and(i, v);   << 
1480 #else                                         << 
1481 #error "Unable to define raw_atomic_fetch_and << 
1482 #endif                                        << 
1483 }                                                775 }
                                                   >> 776 #define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
                                                   >> 777 #endif
1484                                                  778 
1485 /**                                           !! 779 #ifndef arch_atomic_fetch_and_release
1486  * raw_atomic_fetch_and_release() - atomic bi << 
1487  * @i: int value                              << 
1488  * @v: pointer to atomic_t                    << 
1489  *                                            << 
1490  * Atomically updates @v to (@v & @i) with re << 
1491  *                                            << 
1492  * Safe to use in noinstr code; prefer atomic << 
1493  *                                            << 
1494  * Return: The original value of @v.          << 
1495  */                                           << 
1496 static __always_inline int                       780 static __always_inline int
1497 raw_atomic_fetch_and_release(int i, atomic_t  !! 781 arch_atomic_fetch_and_release(int i, atomic_t *v)
1498 {                                                782 {
1499 #if defined(arch_atomic_fetch_and_release)    << 
1500         return arch_atomic_fetch_and_release( << 
1501 #elif defined(arch_atomic_fetch_and_relaxed)  << 
1502         __atomic_release_fence();                783         __atomic_release_fence();
1503         return arch_atomic_fetch_and_relaxed(    784         return arch_atomic_fetch_and_relaxed(i, v);
1504 #elif defined(arch_atomic_fetch_and)          << 
1505         return arch_atomic_fetch_and(i, v);   << 
1506 #else                                         << 
1507 #error "Unable to define raw_atomic_fetch_and << 
1508 #endif                                        << 
1509 }                                                785 }
                                                   >> 786 #define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
                                                   >> 787 #endif
1510                                                  788 
1511 /**                                           !! 789 #ifndef arch_atomic_fetch_and
1512  * raw_atomic_fetch_and_relaxed() - atomic bi << 
1513  * @i: int value                              << 
1514  * @v: pointer to atomic_t                    << 
1515  *                                            << 
1516  * Atomically updates @v to (@v & @i) with re << 
1517  *                                            << 
1518  * Safe to use in noinstr code; prefer atomic << 
1519  *                                            << 
1520  * Return: The original value of @v.          << 
1521  */                                           << 
1522 static __always_inline int                       790 static __always_inline int
1523 raw_atomic_fetch_and_relaxed(int i, atomic_t  !! 791 arch_atomic_fetch_and(int i, atomic_t *v)
1524 {                                                792 {
1525 #if defined(arch_atomic_fetch_and_relaxed)    !! 793         int ret;
1526         return arch_atomic_fetch_and_relaxed( !! 794         __atomic_pre_full_fence();
1527 #elif defined(arch_atomic_fetch_and)          !! 795         ret = arch_atomic_fetch_and_relaxed(i, v);
1528         return arch_atomic_fetch_and(i, v);   !! 796         __atomic_post_full_fence();
1529 #else                                         !! 797         return ret;
1530 #error "Unable to define raw_atomic_fetch_and << 
1531 #endif                                        << 
1532 }                                                798 }
                                                   >> 799 #define arch_atomic_fetch_and arch_atomic_fetch_and
                                                   >> 800 #endif
1533                                                  801 
1534 /**                                           !! 802 #endif /* arch_atomic_fetch_and_relaxed */
1535  * raw_atomic_andnot() - atomic bitwise AND N !! 803 
1536  * @i: int value                              !! 804 #ifndef arch_atomic_andnot
1537  * @v: pointer to atomic_t                    << 
1538  *                                            << 
1539  * Atomically updates @v to (@v & ~@i) with r << 
1540  *                                            << 
1541  * Safe to use in noinstr code; prefer atomic << 
1542  *                                            << 
1543  * Return: Nothing.                           << 
1544  */                                           << 
1545 static __always_inline void                      805 static __always_inline void
1546 raw_atomic_andnot(int i, atomic_t *v)         !! 806 arch_atomic_andnot(int i, atomic_t *v)
1547 {                                                807 {
1548 #if defined(arch_atomic_andnot)               !! 808         arch_atomic_and(~i, v);
1549         arch_atomic_andnot(i, v);             << 
1550 #else                                         << 
1551         raw_atomic_and(~i, v);                << 
1552 #endif                                        << 
1553 }                                                809 }
                                                   >> 810 #define arch_atomic_andnot arch_atomic_andnot
                                                   >> 811 #endif
1554                                                  812 
1555 /**                                           !! 813 #ifndef arch_atomic_fetch_andnot_relaxed
1556  * raw_atomic_fetch_andnot() - atomic bitwise !! 814 #ifdef arch_atomic_fetch_andnot
1557  * @i: int value                              !! 815 #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
1558  * @v: pointer to atomic_t                    !! 816 #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
1559  *                                            !! 817 #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
1560  * Atomically updates @v to (@v & ~@i) with f !! 818 #endif /* arch_atomic_fetch_andnot */
1561  *                                            !! 819 
1562  * Safe to use in noinstr code; prefer atomic !! 820 #ifndef arch_atomic_fetch_andnot
1563  *                                            << 
1564  * Return: The original value of @v.          << 
1565  */                                           << 
1566 static __always_inline int                       821 static __always_inline int
1567 raw_atomic_fetch_andnot(int i, atomic_t *v)   !! 822 arch_atomic_fetch_andnot(int i, atomic_t *v)
1568 {                                                823 {
1569 #if defined(arch_atomic_fetch_andnot)         !! 824         return arch_atomic_fetch_and(~i, v);
1570         return arch_atomic_fetch_andnot(i, v) << 
1571 #elif defined(arch_atomic_fetch_andnot_relaxe << 
1572         int ret;                              << 
1573         __atomic_pre_full_fence();            << 
1574         ret = arch_atomic_fetch_andnot_relaxe << 
1575         __atomic_post_full_fence();           << 
1576         return ret;                           << 
1577 #else                                         << 
1578         return raw_atomic_fetch_and(~i, v);   << 
1579 #endif                                        << 
1580 }                                                825 }
                                                   >> 826 #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
                                                   >> 827 #endif
1581                                                  828 
1582 /**                                           !! 829 #ifndef arch_atomic_fetch_andnot_acquire
1583  * raw_atomic_fetch_andnot_acquire() - atomic << 
1584  * @i: int value                              << 
1585  * @v: pointer to atomic_t                    << 
1586  *                                            << 
1587  * Atomically updates @v to (@v & ~@i) with a << 
1588  *                                            << 
1589  * Safe to use in noinstr code; prefer atomic << 
1590  *                                            << 
1591  * Return: The original value of @v.          << 
1592  */                                           << 
1593 static __always_inline int                       830 static __always_inline int
1594 raw_atomic_fetch_andnot_acquire(int i, atomic !! 831 arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1595 {                                                832 {
1596 #if defined(arch_atomic_fetch_andnot_acquire) !! 833         return arch_atomic_fetch_and_acquire(~i, v);
1597         return arch_atomic_fetch_andnot_acqui << 
1598 #elif defined(arch_atomic_fetch_andnot_relaxe << 
1599         int ret = arch_atomic_fetch_andnot_re << 
1600         __atomic_acquire_fence();             << 
1601         return ret;                           << 
1602 #elif defined(arch_atomic_fetch_andnot)       << 
1603         return arch_atomic_fetch_andnot(i, v) << 
1604 #else                                         << 
1605         return raw_atomic_fetch_and_acquire(~ << 
1606 #endif                                        << 
1607 }                                                834 }
                                                   >> 835 #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
                                                   >> 836 #endif
1608                                                  837 
1609 /**                                           !! 838 #ifndef arch_atomic_fetch_andnot_release
1610  * raw_atomic_fetch_andnot_release() - atomic << 
1611  * @i: int value                              << 
1612  * @v: pointer to atomic_t                    << 
1613  *                                            << 
1614  * Atomically updates @v to (@v & ~@i) with r << 
1615  *                                            << 
1616  * Safe to use in noinstr code; prefer atomic << 
1617  *                                            << 
1618  * Return: The original value of @v.          << 
1619  */                                           << 
1620 static __always_inline int                       839 static __always_inline int
1621 raw_atomic_fetch_andnot_release(int i, atomic !! 840 arch_atomic_fetch_andnot_release(int i, atomic_t *v)
1622 {                                                841 {
1623 #if defined(arch_atomic_fetch_andnot_release) !! 842         return arch_atomic_fetch_and_release(~i, v);
1624         return arch_atomic_fetch_andnot_relea << 
1625 #elif defined(arch_atomic_fetch_andnot_relaxe << 
1626         __atomic_release_fence();             << 
1627         return arch_atomic_fetch_andnot_relax << 
1628 #elif defined(arch_atomic_fetch_andnot)       << 
1629         return arch_atomic_fetch_andnot(i, v) << 
1630 #else                                         << 
1631         return raw_atomic_fetch_and_release(~ << 
1632 #endif                                        << 
1633 }                                                843 }
                                                   >> 844 #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
                                                   >> 845 #endif
1634                                                  846 
1635 /**                                           !! 847 #ifndef arch_atomic_fetch_andnot_relaxed
1636  * raw_atomic_fetch_andnot_relaxed() - atomic << 
1637  * @i: int value                              << 
1638  * @v: pointer to atomic_t                    << 
1639  *                                            << 
1640  * Atomically updates @v to (@v & ~@i) with r << 
1641  *                                            << 
1642  * Safe to use in noinstr code; prefer atomic << 
1643  *                                            << 
1644  * Return: The original value of @v.          << 
1645  */                                           << 
1646 static __always_inline int                       848 static __always_inline int
1647 raw_atomic_fetch_andnot_relaxed(int i, atomic !! 849 arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
1648 {                                                850 {
1649 #if defined(arch_atomic_fetch_andnot_relaxed) !! 851         return arch_atomic_fetch_and_relaxed(~i, v);
1650         return arch_atomic_fetch_andnot_relax !! 852 }
1651 #elif defined(arch_atomic_fetch_andnot)       !! 853 #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
1652         return arch_atomic_fetch_andnot(i, v) << 
1653 #else                                         << 
1654         return raw_atomic_fetch_and_relaxed(~ << 
1655 #endif                                           854 #endif
                                                   >> 855 
                                                   >> 856 #else /* arch_atomic_fetch_andnot_relaxed */
                                                   >> 857 
                                                   >> 858 #ifndef arch_atomic_fetch_andnot_acquire
                                                   >> 859 static __always_inline int
                                                   >> 860 arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
                                                   >> 861 {
                                                   >> 862         int ret = arch_atomic_fetch_andnot_relaxed(i, v);
                                                   >> 863         __atomic_acquire_fence();
                                                   >> 864         return ret;
1656 }                                                865 }
                                                   >> 866 #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
                                                   >> 867 #endif
1657                                                  868 
1658 /**                                           !! 869 #ifndef arch_atomic_fetch_andnot_release
1659  * raw_atomic_or() - atomic bitwise OR with r !! 870 static __always_inline int
1660  * @i: int value                              !! 871 arch_atomic_fetch_andnot_release(int i, atomic_t *v)
1661  * @v: pointer to atomic_t                    << 
1662  *                                            << 
1663  * Atomically updates @v to (@v | @i) with re << 
1664  *                                            << 
1665  * Safe to use in noinstr code; prefer atomic << 
1666  *                                            << 
1667  * Return: Nothing.                           << 
1668  */                                           << 
1669 static __always_inline void                   << 
1670 raw_atomic_or(int i, atomic_t *v)             << 
1671 {                                                872 {
1672         arch_atomic_or(i, v);                 !! 873         __atomic_release_fence();
                                                   >> 874         return arch_atomic_fetch_andnot_relaxed(i, v);
1673 }                                                875 }
                                                   >> 876 #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
                                                   >> 877 #endif
1674                                                  878 
1675 /**                                           !! 879 #ifndef arch_atomic_fetch_andnot
1676  * raw_atomic_fetch_or() - atomic bitwise OR  << 
1677  * @i: int value                              << 
1678  * @v: pointer to atomic_t                    << 
1679  *                                            << 
1680  * Atomically updates @v to (@v | @i) with fu << 
1681  *                                            << 
1682  * Safe to use in noinstr code; prefer atomic << 
1683  *                                            << 
1684  * Return: The original value of @v.          << 
1685  */                                           << 
1686 static __always_inline int                       880 static __always_inline int
1687 raw_atomic_fetch_or(int i, atomic_t *v)       !! 881 arch_atomic_fetch_andnot(int i, atomic_t *v)
1688 {                                                882 {
1689 #if defined(arch_atomic_fetch_or)             << 
1690         return arch_atomic_fetch_or(i, v);    << 
1691 #elif defined(arch_atomic_fetch_or_relaxed)   << 
1692         int ret;                                 883         int ret;
1693         __atomic_pre_full_fence();               884         __atomic_pre_full_fence();
1694         ret = arch_atomic_fetch_or_relaxed(i, !! 885         ret = arch_atomic_fetch_andnot_relaxed(i, v);
1695         __atomic_post_full_fence();              886         __atomic_post_full_fence();
1696         return ret;                              887         return ret;
1697 #else                                         << 
1698 #error "Unable to define raw_atomic_fetch_or" << 
1699 #endif                                        << 
1700 }                                                888 }
                                                   >> 889 #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
                                                   >> 890 #endif
1701                                                  891 
1702 /**                                           !! 892 #endif /* arch_atomic_fetch_andnot_relaxed */
1703  * raw_atomic_fetch_or_acquire() - atomic bit !! 893 
1704  * @i: int value                              !! 894 #ifndef arch_atomic_fetch_or_relaxed
1705  * @v: pointer to atomic_t                    !! 895 #define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
1706  *                                            !! 896 #define arch_atomic_fetch_or_release arch_atomic_fetch_or
1707  * Atomically updates @v to (@v | @i) with ac !! 897 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
1708  *                                            !! 898 #else /* arch_atomic_fetch_or_relaxed */
1709  * Safe to use in noinstr code; prefer atomic !! 899 
1710  *                                            !! 900 #ifndef arch_atomic_fetch_or_acquire
1711  * Return: The original value of @v.          << 
1712  */                                           << 
1713 static __always_inline int                       901 static __always_inline int
1714 raw_atomic_fetch_or_acquire(int i, atomic_t * !! 902 arch_atomic_fetch_or_acquire(int i, atomic_t *v)
1715 {                                                903 {
1716 #if defined(arch_atomic_fetch_or_acquire)     << 
1717         return arch_atomic_fetch_or_acquire(i << 
1718 #elif defined(arch_atomic_fetch_or_relaxed)   << 
1719         int ret = arch_atomic_fetch_or_relaxe    904         int ret = arch_atomic_fetch_or_relaxed(i, v);
1720         __atomic_acquire_fence();                905         __atomic_acquire_fence();
1721         return ret;                              906         return ret;
1722 #elif defined(arch_atomic_fetch_or)           << 
1723         return arch_atomic_fetch_or(i, v);    << 
1724 #else                                         << 
1725 #error "Unable to define raw_atomic_fetch_or_ << 
1726 #endif                                        << 
1727 }                                                907 }
                                                   >> 908 #define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
                                                   >> 909 #endif
1728                                                  910 
1729 /**                                           !! 911 #ifndef arch_atomic_fetch_or_release
1730  * raw_atomic_fetch_or_release() - atomic bit << 
1731  * @i: int value                              << 
1732  * @v: pointer to atomic_t                    << 
1733  *                                            << 
1734  * Atomically updates @v to (@v | @i) with re << 
1735  *                                            << 
1736  * Safe to use in noinstr code; prefer atomic << 
1737  *                                            << 
1738  * Return: The original value of @v.          << 
1739  */                                           << 
1740 static __always_inline int                       912 static __always_inline int
1741 raw_atomic_fetch_or_release(int i, atomic_t * !! 913 arch_atomic_fetch_or_release(int i, atomic_t *v)
1742 {                                                914 {
1743 #if defined(arch_atomic_fetch_or_release)     << 
1744         return arch_atomic_fetch_or_release(i << 
1745 #elif defined(arch_atomic_fetch_or_relaxed)   << 
1746         __atomic_release_fence();                915         __atomic_release_fence();
1747         return arch_atomic_fetch_or_relaxed(i    916         return arch_atomic_fetch_or_relaxed(i, v);
1748 #elif defined(arch_atomic_fetch_or)           << 
1749         return arch_atomic_fetch_or(i, v);    << 
1750 #else                                         << 
1751 #error "Unable to define raw_atomic_fetch_or_ << 
1752 #endif                                        << 
1753 }                                                917 }
1754                                               !! 918 #define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
1755 /**                                           << 
1756  * raw_atomic_fetch_or_relaxed() - atomic bit << 
1757  * @i: int value                              << 
1758  * @v: pointer to atomic_t                    << 
1759  *                                            << 
1760  * Atomically updates @v to (@v | @i) with re << 
1761  *                                            << 
1762  * Safe to use in noinstr code; prefer atomic << 
1763  *                                            << 
1764  * Return: The original value of @v.          << 
1765  */                                           << 
1766 static __always_inline int                    << 
1767 raw_atomic_fetch_or_relaxed(int i, atomic_t * << 
1768 {                                             << 
1769 #if defined(arch_atomic_fetch_or_relaxed)     << 
1770         return arch_atomic_fetch_or_relaxed(i << 
1771 #elif defined(arch_atomic_fetch_or)           << 
1772         return arch_atomic_fetch_or(i, v);    << 
1773 #else                                         << 
1774 #error "Unable to define raw_atomic_fetch_or_ << 
1775 #endif                                           919 #endif
1776 }                                             << 
1777                                                  920 
1778 /**                                           !! 921 #ifndef arch_atomic_fetch_or
1779  * raw_atomic_xor() - atomic bitwise XOR with << 
1780  * @i: int value                              << 
1781  * @v: pointer to atomic_t                    << 
1782  *                                            << 
1783  * Atomically updates @v to (@v ^ @i) with re << 
1784  *                                            << 
1785  * Safe to use in noinstr code; prefer atomic << 
1786  *                                            << 
1787  * Return: Nothing.                           << 
1788  */                                           << 
1789 static __always_inline void                   << 
1790 raw_atomic_xor(int i, atomic_t *v)            << 
1791 {                                             << 
1792         arch_atomic_xor(i, v);                << 
1793 }                                             << 
1794                                               << 
1795 /**                                           << 
1796  * raw_atomic_fetch_xor() - atomic bitwise XO << 
1797  * @i: int value                              << 
1798  * @v: pointer to atomic_t                    << 
1799  *                                            << 
1800  * Atomically updates @v to (@v ^ @i) with fu << 
1801  *                                            << 
1802  * Safe to use in noinstr code; prefer atomic << 
1803  *                                            << 
1804  * Return: The original value of @v.          << 
1805  */                                           << 
1806 static __always_inline int                       922 static __always_inline int
1807 raw_atomic_fetch_xor(int i, atomic_t *v)      !! 923 arch_atomic_fetch_or(int i, atomic_t *v)
1808 {                                                924 {
1809 #if defined(arch_atomic_fetch_xor)            << 
1810         return arch_atomic_fetch_xor(i, v);   << 
1811 #elif defined(arch_atomic_fetch_xor_relaxed)  << 
1812         int ret;                                 925         int ret;
1813         __atomic_pre_full_fence();               926         __atomic_pre_full_fence();
1814         ret = arch_atomic_fetch_xor_relaxed(i !! 927         ret = arch_atomic_fetch_or_relaxed(i, v);
1815         __atomic_post_full_fence();              928         __atomic_post_full_fence();
1816         return ret;                              929         return ret;
1817 #else                                         << 
1818 #error "Unable to define raw_atomic_fetch_xor << 
1819 #endif                                        << 
1820 }                                                930 }
                                                   >> 931 #define arch_atomic_fetch_or arch_atomic_fetch_or
                                                   >> 932 #endif
1821                                                  933 
1822 /**                                           !! 934 #endif /* arch_atomic_fetch_or_relaxed */
1823  * raw_atomic_fetch_xor_acquire() - atomic bi !! 935 
1824  * @i: int value                              !! 936 #ifndef arch_atomic_fetch_xor_relaxed
1825  * @v: pointer to atomic_t                    !! 937 #define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
1826  *                                            !! 938 #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
1827  * Atomically updates @v to (@v ^ @i) with ac !! 939 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
1828  *                                            !! 940 #else /* arch_atomic_fetch_xor_relaxed */
1829  * Safe to use in noinstr code; prefer atomic !! 941 
1830  *                                            !! 942 #ifndef arch_atomic_fetch_xor_acquire
1831  * Return: The original value of @v.          << 
1832  */                                           << 
1833 static __always_inline int                       943 static __always_inline int
1834 raw_atomic_fetch_xor_acquire(int i, atomic_t  !! 944 arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
1835 {                                                945 {
1836 #if defined(arch_atomic_fetch_xor_acquire)    << 
1837         return arch_atomic_fetch_xor_acquire( << 
1838 #elif defined(arch_atomic_fetch_xor_relaxed)  << 
1839         int ret = arch_atomic_fetch_xor_relax    946         int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840         __atomic_acquire_fence();                947         __atomic_acquire_fence();
1841         return ret;                              948         return ret;
1842 #elif defined(arch_atomic_fetch_xor)          << 
1843         return arch_atomic_fetch_xor(i, v);   << 
1844 #else                                         << 
1845 #error "Unable to define raw_atomic_fetch_xor << 
1846 #endif                                        << 
1847 }                                                949 }
                                                   >> 950 #define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
                                                   >> 951 #endif
1848                                                  952 
1849 /**                                           !! 953 #ifndef arch_atomic_fetch_xor_release
1850  * raw_atomic_fetch_xor_release() - atomic bi << 
1851  * @i: int value                              << 
1852  * @v: pointer to atomic_t                    << 
1853  *                                            << 
1854  * Atomically updates @v to (@v ^ @i) with re << 
1855  *                                            << 
1856  * Safe to use in noinstr code; prefer atomic << 
1857  *                                            << 
1858  * Return: The original value of @v.          << 
1859  */                                           << 
1860 static __always_inline int                       954 static __always_inline int
1861 raw_atomic_fetch_xor_release(int i, atomic_t  !! 955 arch_atomic_fetch_xor_release(int i, atomic_t *v)
1862 {                                                956 {
1863 #if defined(arch_atomic_fetch_xor_release)    << 
1864         return arch_atomic_fetch_xor_release( << 
1865 #elif defined(arch_atomic_fetch_xor_relaxed)  << 
1866         __atomic_release_fence();                957         __atomic_release_fence();
1867         return arch_atomic_fetch_xor_relaxed(    958         return arch_atomic_fetch_xor_relaxed(i, v);
1868 #elif defined(arch_atomic_fetch_xor)          << 
1869         return arch_atomic_fetch_xor(i, v);   << 
1870 #else                                         << 
1871 #error "Unable to define raw_atomic_fetch_xor << 
1872 #endif                                        << 
1873 }                                                959 }
1874                                               !! 960 #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
1875 /**                                           << 
1876  * raw_atomic_fetch_xor_relaxed() - atomic bi << 
1877  * @i: int value                              << 
1878  * @v: pointer to atomic_t                    << 
1879  *                                            << 
1880  * Atomically updates @v to (@v ^ @i) with re << 
1881  *                                            << 
1882  * Safe to use in noinstr code; prefer atomic << 
1883  *                                            << 
1884  * Return: The original value of @v.          << 
1885  */                                           << 
1886 static __always_inline int                    << 
1887 raw_atomic_fetch_xor_relaxed(int i, atomic_t  << 
1888 {                                             << 
1889 #if defined(arch_atomic_fetch_xor_relaxed)    << 
1890         return arch_atomic_fetch_xor_relaxed( << 
1891 #elif defined(arch_atomic_fetch_xor)          << 
1892         return arch_atomic_fetch_xor(i, v);   << 
1893 #else                                         << 
1894 #error "Unable to define raw_atomic_fetch_xor << 
1895 #endif                                           961 #endif
1896 }                                             << 
1897                                                  962 
1898 /**                                           !! 963 #ifndef arch_atomic_fetch_xor
1899  * raw_atomic_xchg() - atomic exchange with f << 
1900  * @v: pointer to atomic_t                    << 
1901  * @new: int value to assign                  << 
1902  *                                            << 
1903  * Atomically updates @v to @new with full or << 
1904  *                                            << 
1905  * Safe to use in noinstr code; prefer atomic << 
1906  *                                            << 
1907  * Return: The original value of @v.          << 
1908  */                                           << 
1909 static __always_inline int                       964 static __always_inline int
1910 raw_atomic_xchg(atomic_t *v, int new)         !! 965 arch_atomic_fetch_xor(int i, atomic_t *v)
1911 {                                                966 {
1912 #if defined(arch_atomic_xchg)                 << 
1913         return arch_atomic_xchg(v, new);      << 
1914 #elif defined(arch_atomic_xchg_relaxed)       << 
1915         int ret;                                 967         int ret;
1916         __atomic_pre_full_fence();               968         __atomic_pre_full_fence();
1917         ret = arch_atomic_xchg_relaxed(v, new !! 969         ret = arch_atomic_fetch_xor_relaxed(i, v);
1918         __atomic_post_full_fence();              970         __atomic_post_full_fence();
1919         return ret;                              971         return ret;
1920 #else                                         << 
1921         return raw_xchg(&v->counter, new);    << 
1922 #endif                                        << 
1923 }                                                972 }
                                                   >> 973 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
                                                   >> 974 #endif
1924                                                  975 
1925 /**                                           !! 976 #endif /* arch_atomic_fetch_xor_relaxed */
1926  * raw_atomic_xchg_acquire() - atomic exchang !! 977 
1927  * @v: pointer to atomic_t                    !! 978 #ifndef arch_atomic_xchg_relaxed
1928  * @new: int value to assign                  !! 979 #define arch_atomic_xchg_acquire arch_atomic_xchg
1929  *                                            !! 980 #define arch_atomic_xchg_release arch_atomic_xchg
1930  * Atomically updates @v to @new with acquire !! 981 #define arch_atomic_xchg_relaxed arch_atomic_xchg
1931  *                                            !! 982 #else /* arch_atomic_xchg_relaxed */
1932  * Safe to use in noinstr code; prefer atomic !! 983 
1933  *                                            !! 984 #ifndef arch_atomic_xchg_acquire
1934  * Return: The original value of @v.          << 
1935  */                                           << 
1936 static __always_inline int                       985 static __always_inline int
1937 raw_atomic_xchg_acquire(atomic_t *v, int new) !! 986 arch_atomic_xchg_acquire(atomic_t *v, int i)
1938 {                                                987 {
1939 #if defined(arch_atomic_xchg_acquire)         !! 988         int ret = arch_atomic_xchg_relaxed(v, i);
1940         return arch_atomic_xchg_acquire(v, ne << 
1941 #elif defined(arch_atomic_xchg_relaxed)       << 
1942         int ret = arch_atomic_xchg_relaxed(v, << 
1943         __atomic_acquire_fence();                989         __atomic_acquire_fence();
1944         return ret;                              990         return ret;
1945 #elif defined(arch_atomic_xchg)               << 
1946         return arch_atomic_xchg(v, new);      << 
1947 #else                                         << 
1948         return raw_xchg_acquire(&v->counter,  << 
1949 #endif                                        << 
1950 }                                                991 }
                                                   >> 992 #define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
                                                   >> 993 #endif
1951                                                  994 
1952 /**                                           !! 995 #ifndef arch_atomic_xchg_release
1953  * raw_atomic_xchg_release() - atomic exchang << 
1954  * @v: pointer to atomic_t                    << 
1955  * @new: int value to assign                  << 
1956  *                                            << 
1957  * Atomically updates @v to @new with release << 
1958  *                                            << 
1959  * Safe to use in noinstr code; prefer atomic << 
1960  *                                            << 
1961  * Return: The original value of @v.          << 
1962  */                                           << 
1963 static __always_inline int                       996 static __always_inline int
1964 raw_atomic_xchg_release(atomic_t *v, int new) !! 997 arch_atomic_xchg_release(atomic_t *v, int i)
1965 {                                                998 {
1966 #if defined(arch_atomic_xchg_release)         << 
1967         return arch_atomic_xchg_release(v, ne << 
1968 #elif defined(arch_atomic_xchg_relaxed)       << 
1969         __atomic_release_fence();                999         __atomic_release_fence();
1970         return arch_atomic_xchg_relaxed(v, ne !! 1000         return arch_atomic_xchg_relaxed(v, i);
1971 #elif defined(arch_atomic_xchg)               << 
1972         return arch_atomic_xchg(v, new);      << 
1973 #else                                         << 
1974         return raw_xchg_release(&v->counter,  << 
1975 #endif                                        << 
1976 }                                                1001 }
1977                                               !! 1002 #define arch_atomic_xchg_release arch_atomic_xchg_release
1978 /**                                           << 
1979  * raw_atomic_xchg_relaxed() - atomic exchang << 
1980  * @v: pointer to atomic_t                    << 
1981  * @new: int value to assign                  << 
1982  *                                            << 
1983  * Atomically updates @v to @new with relaxed << 
1984  *                                            << 
1985  * Safe to use in noinstr code; prefer atomic << 
1986  *                                            << 
1987  * Return: The original value of @v.          << 
1988  */                                           << 
1989 static __always_inline int                    << 
1990 raw_atomic_xchg_relaxed(atomic_t *v, int new) << 
1991 {                                             << 
1992 #if defined(arch_atomic_xchg_relaxed)         << 
1993         return arch_atomic_xchg_relaxed(v, ne << 
1994 #elif defined(arch_atomic_xchg)               << 
1995         return arch_atomic_xchg(v, new);      << 
1996 #else                                         << 
1997         return raw_xchg_relaxed(&v->counter,  << 
1998 #endif                                           1003 #endif
1999 }                                             << 
2000                                                  1004 
2001 /**                                           !! 1005 #ifndef arch_atomic_xchg
2002  * raw_atomic_cmpxchg() - atomic compare and  << 
2003  * @v: pointer to atomic_t                    << 
2004  * @old: int value to compare with            << 
2005  * @new: int value to assign                  << 
2006  *                                            << 
2007  * If (@v == @old), atomically updates @v to  << 
2008  * Otherwise, @v is not modified and relaxed  << 
2009  *                                            << 
2010  * Safe to use in noinstr code; prefer atomic << 
2011  *                                            << 
2012  * Return: The original value of @v.          << 
2013  */                                           << 
2014 static __always_inline int                       1006 static __always_inline int
2015 raw_atomic_cmpxchg(atomic_t *v, int old, int  !! 1007 arch_atomic_xchg(atomic_t *v, int i)
2016 {                                                1008 {
2017 #if defined(arch_atomic_cmpxchg)              << 
2018         return arch_atomic_cmpxchg(v, old, ne << 
2019 #elif defined(arch_atomic_cmpxchg_relaxed)    << 
2020         int ret;                                 1009         int ret;
2021         __atomic_pre_full_fence();               1010         __atomic_pre_full_fence();
2022         ret = arch_atomic_cmpxchg_relaxed(v,  !! 1011         ret = arch_atomic_xchg_relaxed(v, i);
2023         __atomic_post_full_fence();              1012         __atomic_post_full_fence();
2024         return ret;                              1013         return ret;
2025 #else                                         << 
2026         return raw_cmpxchg(&v->counter, old,  << 
2027 #endif                                        << 
2028 }                                                1014 }
                                                   >> 1015 #define arch_atomic_xchg arch_atomic_xchg
                                                   >> 1016 #endif
2029                                                  1017 
2030 /**                                           !! 1018 #endif /* arch_atomic_xchg_relaxed */
2031  * raw_atomic_cmpxchg_acquire() - atomic comp !! 1019 
2032  * @v: pointer to atomic_t                    !! 1020 #ifndef arch_atomic_cmpxchg_relaxed
2033  * @old: int value to compare with            !! 1021 #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
2034  * @new: int value to assign                  !! 1022 #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
2035  *                                            !! 1023 #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
2036  * If (@v == @old), atomically updates @v to  !! 1024 #else /* arch_atomic_cmpxchg_relaxed */
2037  * Otherwise, @v is not modified and relaxed  !! 1025 
2038  *                                            !! 1026 #ifndef arch_atomic_cmpxchg_acquire
2039  * Safe to use in noinstr code; prefer atomic << 
2040  *                                            << 
2041  * Return: The original value of @v.          << 
2042  */                                           << 
2043 static __always_inline int                       1027 static __always_inline int
2044 raw_atomic_cmpxchg_acquire(atomic_t *v, int o !! 1028 arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2045 {                                                1029 {
2046 #if defined(arch_atomic_cmpxchg_acquire)      << 
2047         return arch_atomic_cmpxchg_acquire(v, << 
2048 #elif defined(arch_atomic_cmpxchg_relaxed)    << 
2049         int ret = arch_atomic_cmpxchg_relaxed    1030         int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2050         __atomic_acquire_fence();                1031         __atomic_acquire_fence();
2051         return ret;                              1032         return ret;
2052 #elif defined(arch_atomic_cmpxchg)            << 
2053         return arch_atomic_cmpxchg(v, old, ne << 
2054 #else                                         << 
2055         return raw_cmpxchg_acquire(&v->counte << 
2056 #endif                                        << 
2057 }                                                1033 }
                                                   >> 1034 #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
                                                   >> 1035 #endif
2058                                                  1036 
2059 /**                                           !! 1037 #ifndef arch_atomic_cmpxchg_release
2060  * raw_atomic_cmpxchg_release() - atomic comp << 
2061  * @v: pointer to atomic_t                    << 
2062  * @old: int value to compare with            << 
2063  * @new: int value to assign                  << 
2064  *                                            << 
2065  * If (@v == @old), atomically updates @v to  << 
2066  * Otherwise, @v is not modified and relaxed  << 
2067  *                                            << 
2068  * Safe to use in noinstr code; prefer atomic << 
2069  *                                            << 
2070  * Return: The original value of @v.          << 
2071  */                                           << 
2072 static __always_inline int                       1038 static __always_inline int
2073 raw_atomic_cmpxchg_release(atomic_t *v, int o !! 1039 arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2074 {                                                1040 {
2075 #if defined(arch_atomic_cmpxchg_release)      << 
2076         return arch_atomic_cmpxchg_release(v, << 
2077 #elif defined(arch_atomic_cmpxchg_relaxed)    << 
2078         __atomic_release_fence();                1041         __atomic_release_fence();
2079         return arch_atomic_cmpxchg_relaxed(v,    1042         return arch_atomic_cmpxchg_relaxed(v, old, new);
2080 #elif defined(arch_atomic_cmpxchg)            << 
2081         return arch_atomic_cmpxchg(v, old, ne << 
2082 #else                                         << 
2083         return raw_cmpxchg_release(&v->counte << 
2084 #endif                                        << 
2085 }                                                1043 }
                                                   >> 1044 #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
                                                   >> 1045 #endif
2086                                                  1046 
2087 /**                                           !! 1047 #ifndef arch_atomic_cmpxchg
2088  * raw_atomic_cmpxchg_relaxed() - atomic comp << 
2089  * @v: pointer to atomic_t                    << 
2090  * @old: int value to compare with            << 
2091  * @new: int value to assign                  << 
2092  *                                            << 
2093  * If (@v == @old), atomically updates @v to  << 
2094  * Otherwise, @v is not modified and relaxed  << 
2095  *                                            << 
2096  * Safe to use in noinstr code; prefer atomic << 
2097  *                                            << 
2098  * Return: The original value of @v.          << 
2099  */                                           << 
2100 static __always_inline int                       1048 static __always_inline int
2101 raw_atomic_cmpxchg_relaxed(atomic_t *v, int o !! 1049 arch_atomic_cmpxchg(atomic_t *v, int old, int new)
2102 {                                                1050 {
2103 #if defined(arch_atomic_cmpxchg_relaxed)      !! 1051         int ret;
2104         return arch_atomic_cmpxchg_relaxed(v, !! 1052         __atomic_pre_full_fence();
2105 #elif defined(arch_atomic_cmpxchg)            !! 1053         ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2106         return arch_atomic_cmpxchg(v, old, ne !! 1054         __atomic_post_full_fence();
2107 #else                                         !! 1055         return ret;
2108         return raw_cmpxchg_relaxed(&v->counte << 
2109 #endif                                        << 
2110 }                                                1056 }
                                                   >> 1057 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
                                                   >> 1058 #endif
2111                                                  1059 
2112 /**                                           !! 1060 #endif /* arch_atomic_cmpxchg_relaxed */
2113  * raw_atomic_try_cmpxchg() - atomic compare  !! 1061 
2114  * @v: pointer to atomic_t                    !! 1062 #ifndef arch_atomic_try_cmpxchg_relaxed
2115  * @old: pointer to int value to compare with !! 1063 #ifdef arch_atomic_try_cmpxchg
2116  * @new: int value to assign                  !! 1064 #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
2117  *                                            !! 1065 #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
2118  * If (@v == @old), atomically updates @v to  !! 1066 #define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
2119  * Otherwise, @v is not modified, @old is upd !! 1067 #endif /* arch_atomic_try_cmpxchg */
2120  * and relaxed ordering is provided.          !! 1068 
2121  *                                            !! 1069 #ifndef arch_atomic_try_cmpxchg
2122  * Safe to use in noinstr code; prefer atomic << 
2123  *                                            << 
2124  * Return: @true if the exchange occured, @fa << 
2125  */                                           << 
2126 static __always_inline bool                      1070 static __always_inline bool
2127 raw_atomic_try_cmpxchg(atomic_t *v, int *old, !! 1071 arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2128 {                                                1072 {
2129 #if defined(arch_atomic_try_cmpxchg)          << 
2130         return arch_atomic_try_cmpxchg(v, old << 
2131 #elif defined(arch_atomic_try_cmpxchg_relaxed << 
2132         bool ret;                             << 
2133         __atomic_pre_full_fence();            << 
2134         ret = arch_atomic_try_cmpxchg_relaxed << 
2135         __atomic_post_full_fence();           << 
2136         return ret;                           << 
2137 #else                                         << 
2138         int r, o = *old;                         1073         int r, o = *old;
2139         r = raw_atomic_cmpxchg(v, o, new);    !! 1074         r = arch_atomic_cmpxchg(v, o, new);
2140         if (unlikely(r != o))                    1075         if (unlikely(r != o))
2141                 *old = r;                        1076                 *old = r;
2142         return likely(r == o);                   1077         return likely(r == o);
2143 #endif                                        << 
2144 }                                                1078 }
                                                   >> 1079 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
                                                   >> 1080 #endif
2145                                                  1081 
2146 /**                                           !! 1082 #ifndef arch_atomic_try_cmpxchg_acquire
2147  * raw_atomic_try_cmpxchg_acquire() - atomic  << 
2148  * @v: pointer to atomic_t                    << 
2149  * @old: pointer to int value to compare with << 
2150  * @new: int value to assign                  << 
2151  *                                            << 
2152  * If (@v == @old), atomically updates @v to  << 
2153  * Otherwise, @v is not modified, @old is upd << 
2154  * and relaxed ordering is provided.          << 
2155  *                                            << 
2156  * Safe to use in noinstr code; prefer atomic << 
2157  *                                            << 
2158  * Return: @true if the exchange occured, @fa << 
2159  */                                           << 
2160 static __always_inline bool                      1083 static __always_inline bool
2161 raw_atomic_try_cmpxchg_acquire(atomic_t *v, i !! 1084 arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2162 {                                                1085 {
2163 #if defined(arch_atomic_try_cmpxchg_acquire)  << 
2164         return arch_atomic_try_cmpxchg_acquir << 
2165 #elif defined(arch_atomic_try_cmpxchg_relaxed << 
2166         bool ret = arch_atomic_try_cmpxchg_re << 
2167         __atomic_acquire_fence();             << 
2168         return ret;                           << 
2169 #elif defined(arch_atomic_try_cmpxchg)        << 
2170         return arch_atomic_try_cmpxchg(v, old << 
2171 #else                                         << 
2172         int r, o = *old;                         1086         int r, o = *old;
2173         r = raw_atomic_cmpxchg_acquire(v, o,  !! 1087         r = arch_atomic_cmpxchg_acquire(v, o, new);
2174         if (unlikely(r != o))                    1088         if (unlikely(r != o))
2175                 *old = r;                        1089                 *old = r;
2176         return likely(r == o);                   1090         return likely(r == o);
2177 #endif                                        << 
2178 }                                                1091 }
                                                   >> 1092 #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
                                                   >> 1093 #endif
2179                                                  1094 
2180 /**                                           !! 1095 #ifndef arch_atomic_try_cmpxchg_release
2181  * raw_atomic_try_cmpxchg_release() - atomic  << 
2182  * @v: pointer to atomic_t                    << 
2183  * @old: pointer to int value to compare with << 
2184  * @new: int value to assign                  << 
2185  *                                            << 
2186  * If (@v == @old), atomically updates @v to  << 
2187  * Otherwise, @v is not modified, @old is upd << 
2188  * and relaxed ordering is provided.          << 
2189  *                                            << 
2190  * Safe to use in noinstr code; prefer atomic << 
2191  *                                            << 
2192  * Return: @true if the exchange occured, @fa << 
2193  */                                           << 
2194 static __always_inline bool                      1096 static __always_inline bool
2195 raw_atomic_try_cmpxchg_release(atomic_t *v, i !! 1097 arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2196 {                                                1098 {
2197 #if defined(arch_atomic_try_cmpxchg_release)  << 
2198         return arch_atomic_try_cmpxchg_releas << 
2199 #elif defined(arch_atomic_try_cmpxchg_relaxed << 
2200         __atomic_release_fence();             << 
2201         return arch_atomic_try_cmpxchg_relaxe << 
2202 #elif defined(arch_atomic_try_cmpxchg)        << 
2203         return arch_atomic_try_cmpxchg(v, old << 
2204 #else                                         << 
2205         int r, o = *old;                         1099         int r, o = *old;
2206         r = raw_atomic_cmpxchg_release(v, o,  !! 1100         r = arch_atomic_cmpxchg_release(v, o, new);
2207         if (unlikely(r != o))                    1101         if (unlikely(r != o))
2208                 *old = r;                        1102                 *old = r;
2209         return likely(r == o);                   1103         return likely(r == o);
2210 #endif                                        << 
2211 }                                                1104 }
                                                   >> 1105 #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
                                                   >> 1106 #endif
2212                                                  1107 
2213 /**                                           !! 1108 #ifndef arch_atomic_try_cmpxchg_relaxed
2214  * raw_atomic_try_cmpxchg_relaxed() - atomic  << 
2215  * @v: pointer to atomic_t                    << 
2216  * @old: pointer to int value to compare with << 
2217  * @new: int value to assign                  << 
2218  *                                            << 
2219  * If (@v == @old), atomically updates @v to  << 
2220  * Otherwise, @v is not modified, @old is upd << 
2221  * and relaxed ordering is provided.          << 
2222  *                                            << 
2223  * Safe to use in noinstr code; prefer atomic << 
2224  *                                            << 
2225  * Return: @true if the exchange occured, @fa << 
2226  */                                           << 
2227 static __always_inline bool                      1109 static __always_inline bool
2228 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, i !! 1110 arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
2229 {                                                1111 {
2230 #if defined(arch_atomic_try_cmpxchg_relaxed)  << 
2231         return arch_atomic_try_cmpxchg_relaxe << 
2232 #elif defined(arch_atomic_try_cmpxchg)        << 
2233         return arch_atomic_try_cmpxchg(v, old << 
2234 #else                                         << 
2235         int r, o = *old;                         1112         int r, o = *old;
2236         r = raw_atomic_cmpxchg_relaxed(v, o,  !! 1113         r = arch_atomic_cmpxchg_relaxed(v, o, new);
2237         if (unlikely(r != o))                    1114         if (unlikely(r != o))
2238                 *old = r;                        1115                 *old = r;
2239         return likely(r == o);                   1116         return likely(r == o);
2240 #endif                                        << 
2241 }                                                1117 }
                                                   >> 1118 #define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
                                                   >> 1119 #endif
2242                                                  1120 
2243 /**                                           !! 1121 #else /* arch_atomic_try_cmpxchg_relaxed */
2244  * raw_atomic_sub_and_test() - atomic subtrac !! 1122 
2245  * @i: int value to subtract                  !! 1123 #ifndef arch_atomic_try_cmpxchg_acquire
2246  * @v: pointer to atomic_t                    << 
2247  *                                            << 
2248  * Atomically updates @v to (@v - @i) with fu << 
2249  *                                            << 
2250  * Safe to use in noinstr code; prefer atomic << 
2251  *                                            << 
2252  * Return: @true if the resulting value of @v << 
2253  */                                           << 
2254 static __always_inline bool                      1124 static __always_inline bool
2255 raw_atomic_sub_and_test(int i, atomic_t *v)   !! 1125 arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2256 {                                                1126 {
2257 #if defined(arch_atomic_sub_and_test)         !! 1127         bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2258         return arch_atomic_sub_and_test(i, v) !! 1128         __atomic_acquire_fence();
2259 #else                                         !! 1129         return ret;
2260         return raw_atomic_sub_return(i, v) == << 
2261 #endif                                        << 
2262 }                                                1130 }
                                                   >> 1131 #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
                                                   >> 1132 #endif
2263                                                  1133 
2264 /**                                           !! 1134 #ifndef arch_atomic_try_cmpxchg_release
2265  * raw_atomic_dec_and_test() - atomic decreme << 
2266  * @v: pointer to atomic_t                    << 
2267  *                                            << 
2268  * Atomically updates @v to (@v - 1) with ful << 
2269  *                                            << 
2270  * Safe to use in noinstr code; prefer atomic << 
2271  *                                            << 
2272  * Return: @true if the resulting value of @v << 
2273  */                                           << 
2274 static __always_inline bool                      1135 static __always_inline bool
2275 raw_atomic_dec_and_test(atomic_t *v)          !! 1136 arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2276 {                                                1137 {
2277 #if defined(arch_atomic_dec_and_test)         !! 1138         __atomic_release_fence();
2278         return arch_atomic_dec_and_test(v);   !! 1139         return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2279 #else                                         << 
2280         return raw_atomic_dec_return(v) == 0; << 
2281 #endif                                        << 
2282 }                                                1140 }
                                                   >> 1141 #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
                                                   >> 1142 #endif
2283                                                  1143 
2284 /**                                           !! 1144 #ifndef arch_atomic_try_cmpxchg
2285  * raw_atomic_inc_and_test() - atomic increme << 
2286  * @v: pointer to atomic_t                    << 
2287  *                                            << 
2288  * Atomically updates @v to (@v + 1) with ful << 
2289  *                                            << 
2290  * Safe to use in noinstr code; prefer atomic << 
2291  *                                            << 
2292  * Return: @true if the resulting value of @v << 
2293  */                                           << 
2294 static __always_inline bool                      1145 static __always_inline bool
2295 raw_atomic_inc_and_test(atomic_t *v)          !! 1146 arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2296 {                                                1147 {
2297 #if defined(arch_atomic_inc_and_test)         !! 1148         bool ret;
2298         return arch_atomic_inc_and_test(v);   !! 1149         __atomic_pre_full_fence();
2299 #else                                         !! 1150         ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2300         return raw_atomic_inc_return(v) == 0; !! 1151         __atomic_post_full_fence();
2301 #endif                                        !! 1152         return ret;
2302 }                                                1153 }
                                                   >> 1154 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
                                                   >> 1155 #endif
                                                   >> 1156 
                                                   >> 1157 #endif /* arch_atomic_try_cmpxchg_relaxed */
2303                                                  1158 
                                                   >> 1159 #ifndef arch_atomic_sub_and_test
2304 /**                                              1160 /**
2305  * raw_atomic_add_negative() - atomic add and !! 1161  * arch_atomic_sub_and_test - subtract value from variable and test result
2306  * @i: int value to add                       !! 1162  * @i: integer value to subtract
2307  * @v: pointer to atomic_t                    !! 1163  * @v: pointer of type atomic_t
2308  *                                            << 
2309  * Atomically updates @v to (@v + @i) with fu << 
2310  *                                            << 
2311  * Safe to use in noinstr code; prefer atomic << 
2312  *                                               1164  *
2313  * Return: @true if the resulting value of @v !! 1165  * Atomically subtracts @i from @v and returns
                                                   >> 1166  * true if the result is zero, or false for all
                                                   >> 1167  * other cases.
2314  */                                              1168  */
2315 static __always_inline bool                      1169 static __always_inline bool
2316 raw_atomic_add_negative(int i, atomic_t *v)   !! 1170 arch_atomic_sub_and_test(int i, atomic_t *v)
2317 {                                                1171 {
2318 #if defined(arch_atomic_add_negative)         !! 1172         return arch_atomic_sub_return(i, v) == 0;
2319         return arch_atomic_add_negative(i, v) << 
2320 #elif defined(arch_atomic_add_negative_relaxe << 
2321         bool ret;                             << 
2322         __atomic_pre_full_fence();            << 
2323         ret = arch_atomic_add_negative_relaxe << 
2324         __atomic_post_full_fence();           << 
2325         return ret;                           << 
2326 #else                                         << 
2327         return raw_atomic_add_return(i, v) <  << 
2328 #endif                                        << 
2329 }                                                1173 }
                                                   >> 1174 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
                                                   >> 1175 #endif
2330                                                  1176 
                                                   >> 1177 #ifndef arch_atomic_dec_and_test
2331 /**                                              1178 /**
2332  * raw_atomic_add_negative_acquire() - atomic !! 1179  * arch_atomic_dec_and_test - decrement and test
2333  * @i: int value to add                       !! 1180  * @v: pointer of type atomic_t
2334  * @v: pointer to atomic_t                    << 
2335  *                                               1181  *
2336  * Atomically updates @v to (@v + @i) with ac !! 1182  * Atomically decrements @v by 1 and
2337  *                                            !! 1183  * returns true if the result is 0, or false for all other
2338  * Safe to use in noinstr code; prefer atomic !! 1184  * cases.
2339  *                                            << 
2340  * Return: @true if the resulting value of @v << 
2341  */                                              1185  */
2342 static __always_inline bool                      1186 static __always_inline bool
2343 raw_atomic_add_negative_acquire(int i, atomic !! 1187 arch_atomic_dec_and_test(atomic_t *v)
2344 {                                                1188 {
2345 #if defined(arch_atomic_add_negative_acquire) !! 1189         return arch_atomic_dec_return(v) == 0;
2346         return arch_atomic_add_negative_acqui << 
2347 #elif defined(arch_atomic_add_negative_relaxe << 
2348         bool ret = arch_atomic_add_negative_r << 
2349         __atomic_acquire_fence();             << 
2350         return ret;                           << 
2351 #elif defined(arch_atomic_add_negative)       << 
2352         return arch_atomic_add_negative(i, v) << 
2353 #else                                         << 
2354         return raw_atomic_add_return_acquire( << 
2355 #endif                                        << 
2356 }                                                1190 }
                                                   >> 1191 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
                                                   >> 1192 #endif
2357                                                  1193 
                                                   >> 1194 #ifndef arch_atomic_inc_and_test
2358 /**                                              1195 /**
2359  * raw_atomic_add_negative_release() - atomic !! 1196  * arch_atomic_inc_and_test - increment and test
2360  * @i: int value to add                       !! 1197  * @v: pointer of type atomic_t
2361  * @v: pointer to atomic_t                    << 
2362  *                                            << 
2363  * Atomically updates @v to (@v + @i) with re << 
2364  *                                               1198  *
2365  * Safe to use in noinstr code; prefer atomic !! 1199  * Atomically increments @v by 1
2366  *                                            !! 1200  * and returns true if the result is zero, or false for all
2367  * Return: @true if the resulting value of @v !! 1201  * other cases.
2368  */                                              1202  */
2369 static __always_inline bool                      1203 static __always_inline bool
2370 raw_atomic_add_negative_release(int i, atomic !! 1204 arch_atomic_inc_and_test(atomic_t *v)
2371 {                                                1205 {
2372 #if defined(arch_atomic_add_negative_release) !! 1206         return arch_atomic_inc_return(v) == 0;
2373         return arch_atomic_add_negative_relea << 
2374 #elif defined(arch_atomic_add_negative_relaxe << 
2375         __atomic_release_fence();             << 
2376         return arch_atomic_add_negative_relax << 
2377 #elif defined(arch_atomic_add_negative)       << 
2378         return arch_atomic_add_negative(i, v) << 
2379 #else                                         << 
2380         return raw_atomic_add_return_release( << 
2381 #endif                                        << 
2382 }                                                1207 }
                                                   >> 1208 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
                                                   >> 1209 #endif
2383                                                  1210 
                                                   >> 1211 #ifndef arch_atomic_add_negative
2384 /**                                              1212 /**
2385  * raw_atomic_add_negative_relaxed() - atomic !! 1213  * arch_atomic_add_negative - add and test if negative
2386  * @i: int value to add                       !! 1214  * @i: integer value to add
2387  * @v: pointer to atomic_t                    !! 1215  * @v: pointer of type atomic_t
2388  *                                            << 
2389  * Atomically updates @v to (@v + @i) with re << 
2390  *                                               1216  *
2391  * Safe to use in noinstr code; prefer atomic !! 1217  * Atomically adds @i to @v and returns true
2392  *                                            !! 1218  * if the result is negative, or false when
2393  * Return: @true if the resulting value of @v !! 1219  * result is greater than or equal to zero.
2394  */                                              1220  */
2395 static __always_inline bool                      1221 static __always_inline bool
2396 raw_atomic_add_negative_relaxed(int i, atomic !! 1222 arch_atomic_add_negative(int i, atomic_t *v)
2397 {                                                1223 {
2398 #if defined(arch_atomic_add_negative_relaxed) !! 1224         return arch_atomic_add_return(i, v) < 0;
2399         return arch_atomic_add_negative_relax << 
2400 #elif defined(arch_atomic_add_negative)       << 
2401         return arch_atomic_add_negative(i, v) << 
2402 #else                                         << 
2403         return raw_atomic_add_return_relaxed( << 
2404 #endif                                        << 
2405 }                                                1225 }
                                                   >> 1226 #define arch_atomic_add_negative arch_atomic_add_negative
                                                   >> 1227 #endif
2406                                                  1228 
                                                   >> 1229 #ifndef arch_atomic_fetch_add_unless
2407 /**                                              1230 /**
2408  * raw_atomic_fetch_add_unless() - atomic add !! 1231  * arch_atomic_fetch_add_unless - add unless the number is already a given value
2409  * @v: pointer to atomic_t                    !! 1232  * @v: pointer of type atomic_t
2410  * @a: int value to add                       !! 1233  * @a: the amount to add to v...
2411  * @u: int value to compare with              !! 1234  * @u: ...unless v is equal to u.
2412  *                                            << 
2413  * If (@v != @u), atomically updates @v to (@ << 
2414  * Otherwise, @v is not modified and relaxed  << 
2415  *                                            << 
2416  * Safe to use in noinstr code; prefer atomic << 
2417  *                                               1235  *
2418  * Return: The original value of @v.          !! 1236  * Atomically adds @a to @v, so long as @v was not already @u.
                                                   >> 1237  * Returns original value of @v
2419  */                                              1238  */
2420 static __always_inline int                       1239 static __always_inline int
2421 raw_atomic_fetch_add_unless(atomic_t *v, int  !! 1240 arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
2422 {                                                1241 {
2423 #if defined(arch_atomic_fetch_add_unless)     !! 1242         int c = arch_atomic_read(v);
2424         return arch_atomic_fetch_add_unless(v << 
2425 #else                                         << 
2426         int c = raw_atomic_read(v);           << 
2427                                                  1243 
2428         do {                                     1244         do {
2429                 if (unlikely(c == u))            1245                 if (unlikely(c == u))
2430                         break;                   1246                         break;
2431         } while (!raw_atomic_try_cmpxchg(v, & !! 1247         } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
2432                                                  1248 
2433         return c;                                1249         return c;
2434 #endif                                        << 
2435 }                                                1250 }
                                                   >> 1251 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
                                                   >> 1252 #endif
2436                                                  1253 
                                                   >> 1254 #ifndef arch_atomic_add_unless
2437 /**                                              1255 /**
2438  * raw_atomic_add_unless() - atomic add unles !! 1256  * arch_atomic_add_unless - add unless the number is already a given value
2439  * @v: pointer to atomic_t                    !! 1257  * @v: pointer of type atomic_t
2440  * @a: int value to add                       !! 1258  * @a: the amount to add to v...
2441  * @u: int value to compare with              !! 1259  * @u: ...unless v is equal to u.
2442  *                                            << 
2443  * If (@v != @u), atomically updates @v to (@ << 
2444  * Otherwise, @v is not modified and relaxed  << 
2445  *                                            << 
2446  * Safe to use in noinstr code; prefer atomic << 
2447  *                                               1260  *
2448  * Return: @true if @v was updated, @false ot !! 1261  * Atomically adds @a to @v, if @v was not already @u.
                                                   >> 1262  * Returns true if the addition was done.
2449  */                                              1263  */
2450 static __always_inline bool                      1264 static __always_inline bool
2451 raw_atomic_add_unless(atomic_t *v, int a, int !! 1265 arch_atomic_add_unless(atomic_t *v, int a, int u)
2452 {                                                1266 {
2453 #if defined(arch_atomic_add_unless)           !! 1267         return arch_atomic_fetch_add_unless(v, a, u) != u;
2454         return arch_atomic_add_unless(v, a, u << 
2455 #else                                         << 
2456         return raw_atomic_fetch_add_unless(v, << 
2457 #endif                                        << 
2458 }                                                1268 }
                                                   >> 1269 #define arch_atomic_add_unless arch_atomic_add_unless
                                                   >> 1270 #endif
2459                                                  1271 
                                                   >> 1272 #ifndef arch_atomic_inc_not_zero
2460 /**                                              1273 /**
2461  * raw_atomic_inc_not_zero() - atomic increme !! 1274  * arch_atomic_inc_not_zero - increment unless the number is zero
2462  * @v: pointer to atomic_t                    !! 1275  * @v: pointer of type atomic_t
2463  *                                            << 
2464  * If (@v != 0), atomically updates @v to (@v << 
2465  * Otherwise, @v is not modified and relaxed  << 
2466  *                                               1276  *
2467  * Safe to use in noinstr code; prefer atomic !! 1277  * Atomically increments @v by 1, if @v is non-zero.
2468  *                                            !! 1278  * Returns true if the increment was done.
2469  * Return: @true if @v was updated, @false ot << 
2470  */                                              1279  */
2471 static __always_inline bool                      1280 static __always_inline bool
2472 raw_atomic_inc_not_zero(atomic_t *v)          !! 1281 arch_atomic_inc_not_zero(atomic_t *v)
2473 {                                                1282 {
2474 #if defined(arch_atomic_inc_not_zero)         !! 1283         return arch_atomic_add_unless(v, 1, 0);
2475         return arch_atomic_inc_not_zero(v);   << 
2476 #else                                         << 
2477         return raw_atomic_add_unless(v, 1, 0) << 
2478 #endif                                        << 
2479 }                                                1284 }
                                                   >> 1285 #define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
                                                   >> 1286 #endif
2480                                                  1287 
2481 /**                                           !! 1288 #ifndef arch_atomic_inc_unless_negative
2482  * raw_atomic_inc_unless_negative() - atomic  << 
2483  * @v: pointer to atomic_t                    << 
2484  *                                            << 
2485  * If (@v >= 0), atomically updates @v to (@v << 
2486  * Otherwise, @v is not modified and relaxed  << 
2487  *                                            << 
2488  * Safe to use in noinstr code; prefer atomic << 
2489  *                                            << 
2490  * Return: @true if @v was updated, @false ot << 
2491  */                                           << 
2492 static __always_inline bool                      1289 static __always_inline bool
2493 raw_atomic_inc_unless_negative(atomic_t *v)   !! 1290 arch_atomic_inc_unless_negative(atomic_t *v)
2494 {                                                1291 {
2495 #if defined(arch_atomic_inc_unless_negative)  !! 1292         int c = arch_atomic_read(v);
2496         return arch_atomic_inc_unless_negativ << 
2497 #else                                         << 
2498         int c = raw_atomic_read(v);           << 
2499                                                  1293 
2500         do {                                     1294         do {
2501                 if (unlikely(c < 0))             1295                 if (unlikely(c < 0))
2502                         return false;            1296                         return false;
2503         } while (!raw_atomic_try_cmpxchg(v, & !! 1297         } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
2504                                                  1298 
2505         return true;                             1299         return true;
2506 #endif                                        << 
2507 }                                                1300 }
                                                   >> 1301 #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
                                                   >> 1302 #endif
2508                                                  1303 
2509 /**                                           !! 1304 #ifndef arch_atomic_dec_unless_positive
2510  * raw_atomic_dec_unless_positive() - atomic  << 
2511  * @v: pointer to atomic_t                    << 
2512  *                                            << 
2513  * If (@v <= 0), atomically updates @v to (@v << 
2514  * Otherwise, @v is not modified and relaxed  << 
2515  *                                            << 
2516  * Safe to use in noinstr code; prefer atomic << 
2517  *                                            << 
2518  * Return: @true if @v was updated, @false ot << 
2519  */                                           << 
2520 static __always_inline bool                      1305 static __always_inline bool
2521 raw_atomic_dec_unless_positive(atomic_t *v)   !! 1306 arch_atomic_dec_unless_positive(atomic_t *v)
2522 {                                                1307 {
2523 #if defined(arch_atomic_dec_unless_positive)  !! 1308         int c = arch_atomic_read(v);
2524         return arch_atomic_dec_unless_positiv << 
2525 #else                                         << 
2526         int c = raw_atomic_read(v);           << 
2527                                                  1309 
2528         do {                                     1310         do {
2529                 if (unlikely(c > 0))             1311                 if (unlikely(c > 0))
2530                         return false;            1312                         return false;
2531         } while (!raw_atomic_try_cmpxchg(v, & !! 1313         } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
2532                                                  1314 
2533         return true;                             1315         return true;
2534 #endif                                        << 
2535 }                                                1316 }
                                                   >> 1317 #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
                                                   >> 1318 #endif
2536                                                  1319 
2537 /**                                           !! 1320 #ifndef arch_atomic_dec_if_positive
2538  * raw_atomic_dec_if_positive() - atomic decr << 
2539  * @v: pointer to atomic_t                    << 
2540  *                                            << 
2541  * If (@v > 0), atomically updates @v to (@v  << 
2542  * Otherwise, @v is not modified and relaxed  << 
2543  *                                            << 
2544  * Safe to use in noinstr code; prefer atomic << 
2545  *                                            << 
2546  * Return: The old value of (@v - 1), regardl << 
2547  */                                           << 
2548 static __always_inline int                       1321 static __always_inline int
2549 raw_atomic_dec_if_positive(atomic_t *v)       !! 1322 arch_atomic_dec_if_positive(atomic_t *v)
2550 {                                                1323 {
2551 #if defined(arch_atomic_dec_if_positive)      !! 1324         int dec, c = arch_atomic_read(v);
2552         return arch_atomic_dec_if_positive(v) << 
2553 #else                                         << 
2554         int dec, c = raw_atomic_read(v);      << 
2555                                                  1325 
2556         do {                                     1326         do {
2557                 dec = c - 1;                     1327                 dec = c - 1;
2558                 if (unlikely(dec < 0))           1328                 if (unlikely(dec < 0))
2559                         break;                   1329                         break;
2560         } while (!raw_atomic_try_cmpxchg(v, & !! 1330         } while (!arch_atomic_try_cmpxchg(v, &c, dec));
2561                                                  1331 
2562         return dec;                              1332         return dec;
2563 #endif                                        << 
2564 }                                                1333 }
                                                   >> 1334 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
                                                   >> 1335 #endif
2565                                                  1336 
2566 #ifdef CONFIG_GENERIC_ATOMIC64                   1337 #ifdef CONFIG_GENERIC_ATOMIC64
2567 #include <asm-generic/atomic64.h>                1338 #include <asm-generic/atomic64.h>
2568 #endif                                           1339 #endif
2569                                                  1340 
2570 /**                                           !! 1341 #ifndef arch_atomic64_read_acquire
2571  * raw_atomic64_read() - atomic load with rel << 
2572  * @v: pointer to atomic64_t                  << 
2573  *                                            << 
2574  * Atomically loads the value of @v with rela << 
2575  *                                            << 
2576  * Safe to use in noinstr code; prefer atomic << 
2577  *                                            << 
2578  * Return: The value loaded from @v.          << 
2579  */                                           << 
2580 static __always_inline s64                       1342 static __always_inline s64
2581 raw_atomic64_read(const atomic64_t *v)        !! 1343 arch_atomic64_read_acquire(const atomic64_t *v)
2582 {                                                1344 {
2583         return arch_atomic64_read(v);         << 
2584 }                                             << 
2585                                               << 
2586 /**                                           << 
2587  * raw_atomic64_read_acquire() - atomic load  << 
2588  * @v: pointer to atomic64_t                  << 
2589  *                                            << 
2590  * Atomically loads the value of @v with acqu << 
2591  *                                            << 
2592  * Safe to use in noinstr code; prefer atomic << 
2593  *                                            << 
2594  * Return: The value loaded from @v.          << 
2595  */                                           << 
2596 static __always_inline s64                    << 
2597 raw_atomic64_read_acquire(const atomic64_t *v << 
2598 {                                             << 
2599 #if defined(arch_atomic64_read_acquire)       << 
2600         return arch_atomic64_read_acquire(v); << 
2601 #else                                         << 
2602         s64 ret;                                 1345         s64 ret;
2603                                                  1346 
2604         if (__native_word(atomic64_t)) {         1347         if (__native_word(atomic64_t)) {
2605                 ret = smp_load_acquire(&(v)->    1348                 ret = smp_load_acquire(&(v)->counter);
2606         } else {                                 1349         } else {
2607                 ret = raw_atomic64_read(v);   !! 1350                 ret = arch_atomic64_read(v);
2608                 __atomic_acquire_fence();        1351                 __atomic_acquire_fence();
2609         }                                        1352         }
2610                                                  1353 
2611         return ret;                              1354         return ret;
2612 #endif                                        << 
2613 }                                             << 
2614                                               << 
2615 /**                                           << 
2616  * raw_atomic64_set() - atomic set with relax << 
2617  * @v: pointer to atomic64_t                  << 
2618  * @i: s64 value to assign                    << 
2619  *                                            << 
2620  * Atomically sets @v to @i with relaxed orde << 
2621  *                                            << 
2622  * Safe to use in noinstr code; prefer atomic << 
2623  *                                            << 
2624  * Return: Nothing.                           << 
2625  */                                           << 
2626 static __always_inline void                   << 
2627 raw_atomic64_set(atomic64_t *v, s64 i)        << 
2628 {                                             << 
2629         arch_atomic64_set(v, i);              << 
2630 }                                                1355 }
                                                   >> 1356 #define arch_atomic64_read_acquire arch_atomic64_read_acquire
                                                   >> 1357 #endif
2631                                                  1358 
2632 /**                                           !! 1359 #ifndef arch_atomic64_set_release
2633  * raw_atomic64_set_release() - atomic set wi << 
2634  * @v: pointer to atomic64_t                  << 
2635  * @i: s64 value to assign                    << 
2636  *                                            << 
2637  * Atomically sets @v to @i with release orde << 
2638  *                                            << 
2639  * Safe to use in noinstr code; prefer atomic << 
2640  *                                            << 
2641  * Return: Nothing.                           << 
2642  */                                           << 
2643 static __always_inline void                      1360 static __always_inline void
2644 raw_atomic64_set_release(atomic64_t *v, s64 i !! 1361 arch_atomic64_set_release(atomic64_t *v, s64 i)
2645 {                                                1362 {
2646 #if defined(arch_atomic64_set_release)        << 
2647         arch_atomic64_set_release(v, i);      << 
2648 #else                                         << 
2649         if (__native_word(atomic64_t)) {         1363         if (__native_word(atomic64_t)) {
2650                 smp_store_release(&(v)->count    1364                 smp_store_release(&(v)->counter, i);
2651         } else {                                 1365         } else {
2652                 __atomic_release_fence();        1366                 __atomic_release_fence();
2653                 raw_atomic64_set(v, i);       !! 1367                 arch_atomic64_set(v, i);
2654         }                                        1368         }
                                                   >> 1369 }
                                                   >> 1370 #define arch_atomic64_set_release arch_atomic64_set_release
2655 #endif                                           1371 #endif
                                                   >> 1372 
                                                   >> 1373 #ifndef arch_atomic64_add_return_relaxed
                                                   >> 1374 #define arch_atomic64_add_return_acquire arch_atomic64_add_return
                                                   >> 1375 #define arch_atomic64_add_return_release arch_atomic64_add_return
                                                   >> 1376 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return
                                                   >> 1377 #else /* arch_atomic64_add_return_relaxed */
                                                   >> 1378 
                                                   >> 1379 #ifndef arch_atomic64_add_return_acquire
                                                   >> 1380 static __always_inline s64
                                                   >> 1381 arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
                                                   >> 1382 {
                                                   >> 1383         s64 ret = arch_atomic64_add_return_relaxed(i, v);
                                                   >> 1384         __atomic_acquire_fence();
                                                   >> 1385         return ret;
2656 }                                                1386 }
                                                   >> 1387 #define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
                                                   >> 1388 #endif
2657                                                  1389 
2658 /**                                           !! 1390 #ifndef arch_atomic64_add_return_release
2659  * raw_atomic64_add() - atomic add with relax !! 1391 static __always_inline s64
2660  * @i: s64 value to add                       !! 1392 arch_atomic64_add_return_release(s64 i, atomic64_t *v)
2661  * @v: pointer to atomic64_t                  << 
2662  *                                            << 
2663  * Atomically updates @v to (@v + @i) with re << 
2664  *                                            << 
2665  * Safe to use in noinstr code; prefer atomic << 
2666  *                                            << 
2667  * Return: Nothing.                           << 
2668  */                                           << 
2669 static __always_inline void                   << 
2670 raw_atomic64_add(s64 i, atomic64_t *v)        << 
2671 {                                                1393 {
2672         arch_atomic64_add(i, v);              !! 1394         __atomic_release_fence();
                                                   >> 1395         return arch_atomic64_add_return_relaxed(i, v);
2673 }                                                1396 }
                                                   >> 1397 #define arch_atomic64_add_return_release arch_atomic64_add_return_release
                                                   >> 1398 #endif
2674                                                  1399 
2675 /**                                           !! 1400 #ifndef arch_atomic64_add_return
2676  * raw_atomic64_add_return() - atomic add wit << 
2677  * @i: s64 value to add                       << 
2678  * @v: pointer to atomic64_t                  << 
2679  *                                            << 
2680  * Atomically updates @v to (@v + @i) with fu << 
2681  *                                            << 
2682  * Safe to use in noinstr code; prefer atomic << 
2683  *                                            << 
2684  * Return: The updated value of @v.           << 
2685  */                                           << 
2686 static __always_inline s64                       1401 static __always_inline s64
2687 raw_atomic64_add_return(s64 i, atomic64_t *v) !! 1402 arch_atomic64_add_return(s64 i, atomic64_t *v)
2688 {                                                1403 {
2689 #if defined(arch_atomic64_add_return)         << 
2690         return arch_atomic64_add_return(i, v) << 
2691 #elif defined(arch_atomic64_add_return_relaxe << 
2692         s64 ret;                                 1404         s64 ret;
2693         __atomic_pre_full_fence();               1405         __atomic_pre_full_fence();
2694         ret = arch_atomic64_add_return_relaxe    1406         ret = arch_atomic64_add_return_relaxed(i, v);
2695         __atomic_post_full_fence();              1407         __atomic_post_full_fence();
2696         return ret;                              1408         return ret;
2697 #else                                         << 
2698 #error "Unable to define raw_atomic64_add_ret << 
2699 #endif                                        << 
2700 }                                                1409 }
                                                   >> 1410 #define arch_atomic64_add_return arch_atomic64_add_return
                                                   >> 1411 #endif
2701                                                  1412 
2702 /**                                           !! 1413 #endif /* arch_atomic64_add_return_relaxed */
2703  * raw_atomic64_add_return_acquire() - atomic !! 1414 
2704  * @i: s64 value to add                       !! 1415 #ifndef arch_atomic64_fetch_add_relaxed
2705  * @v: pointer to atomic64_t                  !! 1416 #define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
2706  *                                            !! 1417 #define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
2707  * Atomically updates @v to (@v + @i) with ac !! 1418 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
2708  *                                            !! 1419 #else /* arch_atomic64_fetch_add_relaxed */
2709  * Safe to use in noinstr code; prefer atomic !! 1420 
2710  *                                            !! 1421 #ifndef arch_atomic64_fetch_add_acquire
2711  * Return: The updated value of @v.           << 
2712  */                                           << 
2713 static __always_inline s64                       1422 static __always_inline s64
2714 raw_atomic64_add_return_acquire(s64 i, atomic !! 1423 arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2715 {                                                1424 {
2716 #if defined(arch_atomic64_add_return_acquire) !! 1425         s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2717         return arch_atomic64_add_return_acqui << 
2718 #elif defined(arch_atomic64_add_return_relaxe << 
2719         s64 ret = arch_atomic64_add_return_re << 
2720         __atomic_acquire_fence();                1426         __atomic_acquire_fence();
2721         return ret;                              1427         return ret;
2722 #elif defined(arch_atomic64_add_return)       << 
2723         return arch_atomic64_add_return(i, v) << 
2724 #else                                         << 
2725 #error "Unable to define raw_atomic64_add_ret << 
2726 #endif                                        << 
2727 }                                                1428 }
                                                   >> 1429 #define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
                                                   >> 1430 #endif
2728                                                  1431 
2729 /**                                           !! 1432 #ifndef arch_atomic64_fetch_add_release
2730  * raw_atomic64_add_return_release() - atomic << 
2731  * @i: s64 value to add                       << 
2732  * @v: pointer to atomic64_t                  << 
2733  *                                            << 
2734  * Atomically updates @v to (@v + @i) with re << 
2735  *                                            << 
2736  * Safe to use in noinstr code; prefer atomic << 
2737  *                                            << 
2738  * Return: The updated value of @v.           << 
2739  */                                           << 
2740 static __always_inline s64                       1433 static __always_inline s64
2741 raw_atomic64_add_return_release(s64 i, atomic !! 1434 arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2742 {                                                1435 {
2743 #if defined(arch_atomic64_add_return_release) << 
2744         return arch_atomic64_add_return_relea << 
2745 #elif defined(arch_atomic64_add_return_relaxe << 
2746         __atomic_release_fence();                1436         __atomic_release_fence();
2747         return arch_atomic64_add_return_relax !! 1437         return arch_atomic64_fetch_add_relaxed(i, v);
2748 #elif defined(arch_atomic64_add_return)       << 
2749         return arch_atomic64_add_return(i, v) << 
2750 #else                                         << 
2751 #error "Unable to define raw_atomic64_add_ret << 
2752 #endif                                        << 
2753 }                                                1438 }
2754                                               !! 1439 #define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
2755 /**                                           << 
2756  * raw_atomic64_add_return_relaxed() - atomic << 
2757  * @i: s64 value to add                       << 
2758  * @v: pointer to atomic64_t                  << 
2759  *                                            << 
2760  * Atomically updates @v to (@v + @i) with re << 
2761  *                                            << 
2762  * Safe to use in noinstr code; prefer atomic << 
2763  *                                            << 
2764  * Return: The updated value of @v.           << 
2765  */                                           << 
2766 static __always_inline s64                    << 
2767 raw_atomic64_add_return_relaxed(s64 i, atomic << 
2768 {                                             << 
2769 #if defined(arch_atomic64_add_return_relaxed) << 
2770         return arch_atomic64_add_return_relax << 
2771 #elif defined(arch_atomic64_add_return)       << 
2772         return arch_atomic64_add_return(i, v) << 
2773 #else                                         << 
2774 #error "Unable to define raw_atomic64_add_ret << 
2775 #endif                                           1440 #endif
2776 }                                             << 
2777                                                  1441 
2778 /**                                           !! 1442 #ifndef arch_atomic64_fetch_add
2779  * raw_atomic64_fetch_add() - atomic add with << 
2780  * @i: s64 value to add                       << 
2781  * @v: pointer to atomic64_t                  << 
2782  *                                            << 
2783  * Atomically updates @v to (@v + @i) with fu << 
2784  *                                            << 
2785  * Safe to use in noinstr code; prefer atomic << 
2786  *                                            << 
2787  * Return: The original value of @v.          << 
2788  */                                           << 
2789 static __always_inline s64                       1443 static __always_inline s64
2790 raw_atomic64_fetch_add(s64 i, atomic64_t *v)  !! 1444 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
2791 {                                                1445 {
2792 #if defined(arch_atomic64_fetch_add)          << 
2793         return arch_atomic64_fetch_add(i, v); << 
2794 #elif defined(arch_atomic64_fetch_add_relaxed << 
2795         s64 ret;                                 1446         s64 ret;
2796         __atomic_pre_full_fence();               1447         __atomic_pre_full_fence();
2797         ret = arch_atomic64_fetch_add_relaxed    1448         ret = arch_atomic64_fetch_add_relaxed(i, v);
2798         __atomic_post_full_fence();              1449         __atomic_post_full_fence();
2799         return ret;                              1450         return ret;
2800 #else                                         << 
2801 #error "Unable to define raw_atomic64_fetch_a << 
2802 #endif                                        << 
2803 }                                                1451 }
                                                   >> 1452 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
                                                   >> 1453 #endif
2804                                                  1454 
2805 /**                                           !! 1455 #endif /* arch_atomic64_fetch_add_relaxed */
2806  * raw_atomic64_fetch_add_acquire() - atomic  !! 1456 
2807  * @i: s64 value to add                       !! 1457 #ifndef arch_atomic64_sub_return_relaxed
2808  * @v: pointer to atomic64_t                  !! 1458 #define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
2809  *                                            !! 1459 #define arch_atomic64_sub_return_release arch_atomic64_sub_return
2810  * Atomically updates @v to (@v + @i) with ac !! 1460 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
2811  *                                            !! 1461 #else /* arch_atomic64_sub_return_relaxed */
2812  * Safe to use in noinstr code; prefer atomic !! 1462 
2813  *                                            !! 1463 #ifndef arch_atomic64_sub_return_acquire
2814  * Return: The original value of @v.          << 
2815  */                                           << 
2816 static __always_inline s64                       1464 static __always_inline s64
2817 raw_atomic64_fetch_add_acquire(s64 i, atomic6 !! 1465 arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2818 {                                                1466 {
2819 #if defined(arch_atomic64_fetch_add_acquire)  !! 1467         s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2820         return arch_atomic64_fetch_add_acquir << 
2821 #elif defined(arch_atomic64_fetch_add_relaxed << 
2822         s64 ret = arch_atomic64_fetch_add_rel << 
2823         __atomic_acquire_fence();                1468         __atomic_acquire_fence();
2824         return ret;                              1469         return ret;
2825 #elif defined(arch_atomic64_fetch_add)        << 
2826         return arch_atomic64_fetch_add(i, v); << 
2827 #else                                         << 
2828 #error "Unable to define raw_atomic64_fetch_a << 
2829 #endif                                        << 
2830 }                                                1470 }
                                                   >> 1471 #define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
                                                   >> 1472 #endif
2831                                                  1473 
2832 /**                                           !! 1474 #ifndef arch_atomic64_sub_return_release
2833  * raw_atomic64_fetch_add_release() - atomic  << 
2834  * @i: s64 value to add                       << 
2835  * @v: pointer to atomic64_t                  << 
2836  *                                            << 
2837  * Atomically updates @v to (@v + @i) with re << 
2838  *                                            << 
2839  * Safe to use in noinstr code; prefer atomic << 
2840  *                                            << 
2841  * Return: The original value of @v.          << 
2842  */                                           << 
2843 static __always_inline s64                       1475 static __always_inline s64
2844 raw_atomic64_fetch_add_release(s64 i, atomic6 !! 1476 arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
2845 {                                                1477 {
2846 #if defined(arch_atomic64_fetch_add_release)  << 
2847         return arch_atomic64_fetch_add_releas << 
2848 #elif defined(arch_atomic64_fetch_add_relaxed << 
2849         __atomic_release_fence();                1478         __atomic_release_fence();
2850         return arch_atomic64_fetch_add_relaxe !! 1479         return arch_atomic64_sub_return_relaxed(i, v);
2851 #elif defined(arch_atomic64_fetch_add)        << 
2852         return arch_atomic64_fetch_add(i, v); << 
2853 #else                                         << 
2854 #error "Unable to define raw_atomic64_fetch_a << 
2855 #endif                                        << 
2856 }                                                1480 }
2857                                               !! 1481 #define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
2858 /**                                           << 
2859  * raw_atomic64_fetch_add_relaxed() - atomic  << 
2860  * @i: s64 value to add                       << 
2861  * @v: pointer to atomic64_t                  << 
2862  *                                            << 
2863  * Atomically updates @v to (@v + @i) with re << 
2864  *                                            << 
2865  * Safe to use in noinstr code; prefer atomic << 
2866  *                                            << 
2867  * Return: The original value of @v.          << 
2868  */                                           << 
2869 static __always_inline s64                    << 
2870 raw_atomic64_fetch_add_relaxed(s64 i, atomic6 << 
2871 {                                             << 
2872 #if defined(arch_atomic64_fetch_add_relaxed)  << 
2873         return arch_atomic64_fetch_add_relaxe << 
2874 #elif defined(arch_atomic64_fetch_add)        << 
2875         return arch_atomic64_fetch_add(i, v); << 
2876 #else                                         << 
2877 #error "Unable to define raw_atomic64_fetch_a << 
2878 #endif                                           1482 #endif
2879 }                                             << 
2880                                               << 
2881 /**                                           << 
2882  * raw_atomic64_sub() - atomic subtract with  << 
2883  * @i: s64 value to subtract                  << 
2884  * @v: pointer to atomic64_t                  << 
2885  *                                            << 
2886  * Atomically updates @v to (@v - @i) with re << 
2887  *                                            << 
2888  * Safe to use in noinstr code; prefer atomic << 
2889  *                                            << 
2890  * Return: Nothing.                           << 
2891  */                                           << 
2892 static __always_inline void                   << 
2893 raw_atomic64_sub(s64 i, atomic64_t *v)        << 
2894 {                                             << 
2895         arch_atomic64_sub(i, v);              << 
2896 }                                             << 
2897                                                  1483 
2898 /**                                           !! 1484 #ifndef arch_atomic64_sub_return
2899  * raw_atomic64_sub_return() - atomic subtrac << 
2900  * @i: s64 value to subtract                  << 
2901  * @v: pointer to atomic64_t                  << 
2902  *                                            << 
2903  * Atomically updates @v to (@v - @i) with fu << 
2904  *                                            << 
2905  * Safe to use in noinstr code; prefer atomic << 
2906  *                                            << 
2907  * Return: The updated value of @v.           << 
2908  */                                           << 
2909 static __always_inline s64                       1485 static __always_inline s64
2910 raw_atomic64_sub_return(s64 i, atomic64_t *v) !! 1486 arch_atomic64_sub_return(s64 i, atomic64_t *v)
2911 {                                                1487 {
2912 #if defined(arch_atomic64_sub_return)         << 
2913         return arch_atomic64_sub_return(i, v) << 
2914 #elif defined(arch_atomic64_sub_return_relaxe << 
2915         s64 ret;                                 1488         s64 ret;
2916         __atomic_pre_full_fence();               1489         __atomic_pre_full_fence();
2917         ret = arch_atomic64_sub_return_relaxe    1490         ret = arch_atomic64_sub_return_relaxed(i, v);
2918         __atomic_post_full_fence();              1491         __atomic_post_full_fence();
2919         return ret;                              1492         return ret;
2920 #else                                         << 
2921 #error "Unable to define raw_atomic64_sub_ret << 
2922 #endif                                        << 
2923 }                                                1493 }
                                                   >> 1494 #define arch_atomic64_sub_return arch_atomic64_sub_return
                                                   >> 1495 #endif
2924                                                  1496 
2925 /**                                           !! 1497 #endif /* arch_atomic64_sub_return_relaxed */
2926  * raw_atomic64_sub_return_acquire() - atomic !! 1498 
2927  * @i: s64 value to subtract                  !! 1499 #ifndef arch_atomic64_fetch_sub_relaxed
2928  * @v: pointer to atomic64_t                  !! 1500 #define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
2929  *                                            !! 1501 #define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
2930  * Atomically updates @v to (@v - @i) with ac !! 1502 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
2931  *                                            !! 1503 #else /* arch_atomic64_fetch_sub_relaxed */
2932  * Safe to use in noinstr code; prefer atomic !! 1504 
2933  *                                            !! 1505 #ifndef arch_atomic64_fetch_sub_acquire
2934  * Return: The updated value of @v.           << 
2935  */                                           << 
2936 static __always_inline s64                       1506 static __always_inline s64
2937 raw_atomic64_sub_return_acquire(s64 i, atomic !! 1507 arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
2938 {                                                1508 {
2939 #if defined(arch_atomic64_sub_return_acquire) !! 1509         s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
2940         return arch_atomic64_sub_return_acqui << 
2941 #elif defined(arch_atomic64_sub_return_relaxe << 
2942         s64 ret = arch_atomic64_sub_return_re << 
2943         __atomic_acquire_fence();                1510         __atomic_acquire_fence();
2944         return ret;                              1511         return ret;
2945 #elif defined(arch_atomic64_sub_return)       << 
2946         return arch_atomic64_sub_return(i, v) << 
2947 #else                                         << 
2948 #error "Unable to define raw_atomic64_sub_ret << 
2949 #endif                                        << 
2950 }                                                1512 }
                                                   >> 1513 #define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
                                                   >> 1514 #endif
2951                                                  1515 
2952 /**                                           !! 1516 #ifndef arch_atomic64_fetch_sub_release
2953  * raw_atomic64_sub_return_release() - atomic << 
2954  * @i: s64 value to subtract                  << 
2955  * @v: pointer to atomic64_t                  << 
2956  *                                            << 
2957  * Atomically updates @v to (@v - @i) with re << 
2958  *                                            << 
2959  * Safe to use in noinstr code; prefer atomic << 
2960  *                                            << 
2961  * Return: The updated value of @v.           << 
2962  */                                           << 
2963 static __always_inline s64                       1517 static __always_inline s64
2964 raw_atomic64_sub_return_release(s64 i, atomic !! 1518 arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
2965 {                                                1519 {
2966 #if defined(arch_atomic64_sub_return_release) << 
2967         return arch_atomic64_sub_return_relea << 
2968 #elif defined(arch_atomic64_sub_return_relaxe << 
2969         __atomic_release_fence();                1520         __atomic_release_fence();
2970         return arch_atomic64_sub_return_relax !! 1521         return arch_atomic64_fetch_sub_relaxed(i, v);
2971 #elif defined(arch_atomic64_sub_return)       << 
2972         return arch_atomic64_sub_return(i, v) << 
2973 #else                                         << 
2974 #error "Unable to define raw_atomic64_sub_ret << 
2975 #endif                                        << 
2976 }                                                1522 }
2977                                               !! 1523 #define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
2978 /**                                           << 
2979  * raw_atomic64_sub_return_relaxed() - atomic << 
2980  * @i: s64 value to subtract                  << 
2981  * @v: pointer to atomic64_t                  << 
2982  *                                            << 
2983  * Atomically updates @v to (@v - @i) with re << 
2984  *                                            << 
2985  * Safe to use in noinstr code; prefer atomic << 
2986  *                                            << 
2987  * Return: The updated value of @v.           << 
2988  */                                           << 
2989 static __always_inline s64                    << 
2990 raw_atomic64_sub_return_relaxed(s64 i, atomic << 
2991 {                                             << 
2992 #if defined(arch_atomic64_sub_return_relaxed) << 
2993         return arch_atomic64_sub_return_relax << 
2994 #elif defined(arch_atomic64_sub_return)       << 
2995         return arch_atomic64_sub_return(i, v) << 
2996 #else                                         << 
2997 #error "Unable to define raw_atomic64_sub_ret << 
2998 #endif                                           1524 #endif
2999 }                                             << 
3000                                                  1525 
3001 /**                                           !! 1526 #ifndef arch_atomic64_fetch_sub
3002  * raw_atomic64_fetch_sub() - atomic subtract << 
3003  * @i: s64 value to subtract                  << 
3004  * @v: pointer to atomic64_t                  << 
3005  *                                            << 
3006  * Atomically updates @v to (@v - @i) with fu << 
3007  *                                            << 
3008  * Safe to use in noinstr code; prefer atomic << 
3009  *                                            << 
3010  * Return: The original value of @v.          << 
3011  */                                           << 
3012 static __always_inline s64                       1527 static __always_inline s64
3013 raw_atomic64_fetch_sub(s64 i, atomic64_t *v)  !! 1528 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
3014 {                                                1529 {
3015 #if defined(arch_atomic64_fetch_sub)          << 
3016         return arch_atomic64_fetch_sub(i, v); << 
3017 #elif defined(arch_atomic64_fetch_sub_relaxed << 
3018         s64 ret;                                 1530         s64 ret;
3019         __atomic_pre_full_fence();               1531         __atomic_pre_full_fence();
3020         ret = arch_atomic64_fetch_sub_relaxed    1532         ret = arch_atomic64_fetch_sub_relaxed(i, v);
3021         __atomic_post_full_fence();              1533         __atomic_post_full_fence();
3022         return ret;                              1534         return ret;
3023 #else                                         !! 1535 }
3024 #error "Unable to define raw_atomic64_fetch_s !! 1536 #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
3025 #endif                                           1537 #endif
                                                   >> 1538 
                                                   >> 1539 #endif /* arch_atomic64_fetch_sub_relaxed */
                                                   >> 1540 
                                                   >> 1541 #ifndef arch_atomic64_inc
                                                   >> 1542 static __always_inline void
                                                   >> 1543 arch_atomic64_inc(atomic64_t *v)
                                                   >> 1544 {
                                                   >> 1545         arch_atomic64_add(1, v);
3026 }                                                1546 }
                                                   >> 1547 #define arch_atomic64_inc arch_atomic64_inc
                                                   >> 1548 #endif
3027                                                  1549 
3028 /**                                           !! 1550 #ifndef arch_atomic64_inc_return_relaxed
3029  * raw_atomic64_fetch_sub_acquire() - atomic  !! 1551 #ifdef arch_atomic64_inc_return
3030  * @i: s64 value to subtract                  !! 1552 #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
3031  * @v: pointer to atomic64_t                  !! 1553 #define arch_atomic64_inc_return_release arch_atomic64_inc_return
3032  *                                            !! 1554 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
3033  * Atomically updates @v to (@v - @i) with ac !! 1555 #endif /* arch_atomic64_inc_return */
3034  *                                            !! 1556 
3035  * Safe to use in noinstr code; prefer atomic !! 1557 #ifndef arch_atomic64_inc_return
3036  *                                            << 
3037  * Return: The original value of @v.          << 
3038  */                                           << 
3039 static __always_inline s64                       1558 static __always_inline s64
3040 raw_atomic64_fetch_sub_acquire(s64 i, atomic6 !! 1559 arch_atomic64_inc_return(atomic64_t *v)
3041 {                                                1560 {
3042 #if defined(arch_atomic64_fetch_sub_acquire)  !! 1561         return arch_atomic64_add_return(1, v);
3043         return arch_atomic64_fetch_sub_acquir << 
3044 #elif defined(arch_atomic64_fetch_sub_relaxed << 
3045         s64 ret = arch_atomic64_fetch_sub_rel << 
3046         __atomic_acquire_fence();             << 
3047         return ret;                           << 
3048 #elif defined(arch_atomic64_fetch_sub)        << 
3049         return arch_atomic64_fetch_sub(i, v); << 
3050 #else                                         << 
3051 #error "Unable to define raw_atomic64_fetch_s << 
3052 #endif                                        << 
3053 }                                                1562 }
                                                   >> 1563 #define arch_atomic64_inc_return arch_atomic64_inc_return
                                                   >> 1564 #endif
3054                                                  1565 
3055 /**                                           !! 1566 #ifndef arch_atomic64_inc_return_acquire
3056  * raw_atomic64_fetch_sub_release() - atomic  << 
3057  * @i: s64 value to subtract                  << 
3058  * @v: pointer to atomic64_t                  << 
3059  *                                            << 
3060  * Atomically updates @v to (@v - @i) with re << 
3061  *                                            << 
3062  * Safe to use in noinstr code; prefer atomic << 
3063  *                                            << 
3064  * Return: The original value of @v.          << 
3065  */                                           << 
3066 static __always_inline s64                       1567 static __always_inline s64
3067 raw_atomic64_fetch_sub_release(s64 i, atomic6 !! 1568 arch_atomic64_inc_return_acquire(atomic64_t *v)
3068 {                                                1569 {
3069 #if defined(arch_atomic64_fetch_sub_release)  !! 1570         return arch_atomic64_add_return_acquire(1, v);
3070         return arch_atomic64_fetch_sub_releas << 
3071 #elif defined(arch_atomic64_fetch_sub_relaxed << 
3072         __atomic_release_fence();             << 
3073         return arch_atomic64_fetch_sub_relaxe << 
3074 #elif defined(arch_atomic64_fetch_sub)        << 
3075         return arch_atomic64_fetch_sub(i, v); << 
3076 #else                                         << 
3077 #error "Unable to define raw_atomic64_fetch_s << 
3078 #endif                                        << 
3079 }                                                1571 }
                                                   >> 1572 #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
                                                   >> 1573 #endif
3080                                                  1574 
3081 /**                                           !! 1575 #ifndef arch_atomic64_inc_return_release
3082  * raw_atomic64_fetch_sub_relaxed() - atomic  << 
3083  * @i: s64 value to subtract                  << 
3084  * @v: pointer to atomic64_t                  << 
3085  *                                            << 
3086  * Atomically updates @v to (@v - @i) with re << 
3087  *                                            << 
3088  * Safe to use in noinstr code; prefer atomic << 
3089  *                                            << 
3090  * Return: The original value of @v.          << 
3091  */                                           << 
3092 static __always_inline s64                       1576 static __always_inline s64
3093 raw_atomic64_fetch_sub_relaxed(s64 i, atomic6 !! 1577 arch_atomic64_inc_return_release(atomic64_t *v)
3094 {                                                1578 {
3095 #if defined(arch_atomic64_fetch_sub_relaxed)  !! 1579         return arch_atomic64_add_return_release(1, v);
3096         return arch_atomic64_fetch_sub_relaxe !! 1580 }
3097 #elif defined(arch_atomic64_fetch_sub)        !! 1581 #define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
3098         return arch_atomic64_fetch_sub(i, v); << 
3099 #else                                         << 
3100 #error "Unable to define raw_atomic64_fetch_s << 
3101 #endif                                           1582 #endif
                                                   >> 1583 
                                                   >> 1584 #ifndef arch_atomic64_inc_return_relaxed
                                                   >> 1585 static __always_inline s64
                                                   >> 1586 arch_atomic64_inc_return_relaxed(atomic64_t *v)
                                                   >> 1587 {
                                                   >> 1588         return arch_atomic64_add_return_relaxed(1, v);
3102 }                                                1589 }
                                                   >> 1590 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
                                                   >> 1591 #endif
3103                                                  1592 
3104 /**                                           !! 1593 #else /* arch_atomic64_inc_return_relaxed */
3105  * raw_atomic64_inc() - atomic increment with !! 1594 
3106  * @v: pointer to atomic64_t                  !! 1595 #ifndef arch_atomic64_inc_return_acquire
3107  *                                            !! 1596 static __always_inline s64
3108  * Atomically updates @v to (@v + 1) with rel !! 1597 arch_atomic64_inc_return_acquire(atomic64_t *v)
3109  *                                            << 
3110  * Safe to use in noinstr code; prefer atomic << 
3111  *                                            << 
3112  * Return: Nothing.                           << 
3113  */                                           << 
3114 static __always_inline void                   << 
3115 raw_atomic64_inc(atomic64_t *v)               << 
3116 {                                                1598 {
3117 #if defined(arch_atomic64_inc)                !! 1599         s64 ret = arch_atomic64_inc_return_relaxed(v);
3118         arch_atomic64_inc(v);                 !! 1600         __atomic_acquire_fence();
3119 #else                                         !! 1601         return ret;
3120         raw_atomic64_add(1, v);               !! 1602 }
                                                   >> 1603 #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
3121 #endif                                           1604 #endif
                                                   >> 1605 
                                                   >> 1606 #ifndef arch_atomic64_inc_return_release
                                                   >> 1607 static __always_inline s64
                                                   >> 1608 arch_atomic64_inc_return_release(atomic64_t *v)
                                                   >> 1609 {
                                                   >> 1610         __atomic_release_fence();
                                                   >> 1611         return arch_atomic64_inc_return_relaxed(v);
3122 }                                                1612 }
                                                   >> 1613 #define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
                                                   >> 1614 #endif
3123                                                  1615 
3124 /**                                           !! 1616 #ifndef arch_atomic64_inc_return
3125  * raw_atomic64_inc_return() - atomic increme << 
3126  * @v: pointer to atomic64_t                  << 
3127  *                                            << 
3128  * Atomically updates @v to (@v + 1) with ful << 
3129  *                                            << 
3130  * Safe to use in noinstr code; prefer atomic << 
3131  *                                            << 
3132  * Return: The updated value of @v.           << 
3133  */                                           << 
3134 static __always_inline s64                       1617 static __always_inline s64
3135 raw_atomic64_inc_return(atomic64_t *v)        !! 1618 arch_atomic64_inc_return(atomic64_t *v)
3136 {                                                1619 {
3137 #if defined(arch_atomic64_inc_return)         << 
3138         return arch_atomic64_inc_return(v);   << 
3139 #elif defined(arch_atomic64_inc_return_relaxe << 
3140         s64 ret;                                 1620         s64 ret;
3141         __atomic_pre_full_fence();               1621         __atomic_pre_full_fence();
3142         ret = arch_atomic64_inc_return_relaxe    1622         ret = arch_atomic64_inc_return_relaxed(v);
3143         __atomic_post_full_fence();              1623         __atomic_post_full_fence();
3144         return ret;                              1624         return ret;
3145 #else                                         << 
3146         return raw_atomic64_add_return(1, v); << 
3147 #endif                                        << 
3148 }                                                1625 }
                                                   >> 1626 #define arch_atomic64_inc_return arch_atomic64_inc_return
                                                   >> 1627 #endif
3149                                                  1628 
3150 /**                                           !! 1629 #endif /* arch_atomic64_inc_return_relaxed */
3151  * raw_atomic64_inc_return_acquire() - atomic !! 1630 
3152  * @v: pointer to atomic64_t                  !! 1631 #ifndef arch_atomic64_fetch_inc_relaxed
3153  *                                            !! 1632 #ifdef arch_atomic64_fetch_inc
3154  * Atomically updates @v to (@v + 1) with acq !! 1633 #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
3155  *                                            !! 1634 #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
3156  * Safe to use in noinstr code; prefer atomic !! 1635 #define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
3157  *                                            !! 1636 #endif /* arch_atomic64_fetch_inc */
3158  * Return: The updated value of @v.           !! 1637 
3159  */                                           !! 1638 #ifndef arch_atomic64_fetch_inc
3160 static __always_inline s64                       1639 static __always_inline s64
3161 raw_atomic64_inc_return_acquire(atomic64_t *v !! 1640 arch_atomic64_fetch_inc(atomic64_t *v)
3162 {                                                1641 {
3163 #if defined(arch_atomic64_inc_return_acquire) !! 1642         return arch_atomic64_fetch_add(1, v);
3164         return arch_atomic64_inc_return_acqui << 
3165 #elif defined(arch_atomic64_inc_return_relaxe << 
3166         s64 ret = arch_atomic64_inc_return_re << 
3167         __atomic_acquire_fence();             << 
3168         return ret;                           << 
3169 #elif defined(arch_atomic64_inc_return)       << 
3170         return arch_atomic64_inc_return(v);   << 
3171 #else                                         << 
3172         return raw_atomic64_add_return_acquir << 
3173 #endif                                        << 
3174 }                                                1643 }
                                                   >> 1644 #define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
                                                   >> 1645 #endif
3175                                                  1646 
3176 /**                                           !! 1647 #ifndef arch_atomic64_fetch_inc_acquire
3177  * raw_atomic64_inc_return_release() - atomic << 
3178  * @v: pointer to atomic64_t                  << 
3179  *                                            << 
3180  * Atomically updates @v to (@v + 1) with rel << 
3181  *                                            << 
3182  * Safe to use in noinstr code; prefer atomic << 
3183  *                                            << 
3184  * Return: The updated value of @v.           << 
3185  */                                           << 
3186 static __always_inline s64                       1648 static __always_inline s64
3187 raw_atomic64_inc_return_release(atomic64_t *v !! 1649 arch_atomic64_fetch_inc_acquire(atomic64_t *v)
3188 {                                                1650 {
3189 #if defined(arch_atomic64_inc_return_release) !! 1651         return arch_atomic64_fetch_add_acquire(1, v);
3190         return arch_atomic64_inc_return_relea << 
3191 #elif defined(arch_atomic64_inc_return_relaxe << 
3192         __atomic_release_fence();             << 
3193         return arch_atomic64_inc_return_relax << 
3194 #elif defined(arch_atomic64_inc_return)       << 
3195         return arch_atomic64_inc_return(v);   << 
3196 #else                                         << 
3197         return raw_atomic64_add_return_releas << 
3198 #endif                                        << 
3199 }                                                1652 }
                                                   >> 1653 #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
                                                   >> 1654 #endif
3200                                                  1655 
3201 /**                                           !! 1656 #ifndef arch_atomic64_fetch_inc_release
3202  * raw_atomic64_inc_return_relaxed() - atomic << 
3203  * @v: pointer to atomic64_t                  << 
3204  *                                            << 
3205  * Atomically updates @v to (@v + 1) with rel << 
3206  *                                            << 
3207  * Safe to use in noinstr code; prefer atomic << 
3208  *                                            << 
3209  * Return: The updated value of @v.           << 
3210  */                                           << 
3211 static __always_inline s64                       1657 static __always_inline s64
3212 raw_atomic64_inc_return_relaxed(atomic64_t *v !! 1658 arch_atomic64_fetch_inc_release(atomic64_t *v)
3213 {                                                1659 {
3214 #if defined(arch_atomic64_inc_return_relaxed) !! 1660         return arch_atomic64_fetch_add_release(1, v);
3215         return arch_atomic64_inc_return_relax << 
3216 #elif defined(arch_atomic64_inc_return)       << 
3217         return arch_atomic64_inc_return(v);   << 
3218 #else                                         << 
3219         return raw_atomic64_add_return_relaxe << 
3220 #endif                                        << 
3221 }                                                1661 }
                                                   >> 1662 #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
                                                   >> 1663 #endif
3222                                                  1664 
3223 /**                                           !! 1665 #ifndef arch_atomic64_fetch_inc_relaxed
3224  * raw_atomic64_fetch_inc() - atomic incremen << 
3225  * @v: pointer to atomic64_t                  << 
3226  *                                            << 
3227  * Atomically updates @v to (@v + 1) with ful << 
3228  *                                            << 
3229  * Safe to use in noinstr code; prefer atomic << 
3230  *                                            << 
3231  * Return: The original value of @v.          << 
3232  */                                           << 
3233 static __always_inline s64                       1666 static __always_inline s64
3234 raw_atomic64_fetch_inc(atomic64_t *v)         !! 1667 arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
3235 {                                                1668 {
3236 #if defined(arch_atomic64_fetch_inc)          !! 1669         return arch_atomic64_fetch_add_relaxed(1, v);
3237         return arch_atomic64_fetch_inc(v);    << 
3238 #elif defined(arch_atomic64_fetch_inc_relaxed << 
3239         s64 ret;                              << 
3240         __atomic_pre_full_fence();            << 
3241         ret = arch_atomic64_fetch_inc_relaxed << 
3242         __atomic_post_full_fence();           << 
3243         return ret;                           << 
3244 #else                                         << 
3245         return raw_atomic64_fetch_add(1, v);  << 
3246 #endif                                        << 
3247 }                                                1670 }
                                                   >> 1671 #define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
                                                   >> 1672 #endif
3248                                                  1673 
3249 /**                                           !! 1674 #else /* arch_atomic64_fetch_inc_relaxed */
3250  * raw_atomic64_fetch_inc_acquire() - atomic  !! 1675 
3251  * @v: pointer to atomic64_t                  !! 1676 #ifndef arch_atomic64_fetch_inc_acquire
3252  *                                            << 
3253  * Atomically updates @v to (@v + 1) with acq << 
3254  *                                            << 
3255  * Safe to use in noinstr code; prefer atomic << 
3256  *                                            << 
3257  * Return: The original value of @v.          << 
3258  */                                           << 
3259 static __always_inline s64                       1677 static __always_inline s64
3260 raw_atomic64_fetch_inc_acquire(atomic64_t *v) !! 1678 arch_atomic64_fetch_inc_acquire(atomic64_t *v)
3261 {                                                1679 {
3262 #if defined(arch_atomic64_fetch_inc_acquire)  << 
3263         return arch_atomic64_fetch_inc_acquir << 
3264 #elif defined(arch_atomic64_fetch_inc_relaxed << 
3265         s64 ret = arch_atomic64_fetch_inc_rel    1680         s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3266         __atomic_acquire_fence();                1681         __atomic_acquire_fence();
3267         return ret;                              1682         return ret;
3268 #elif defined(arch_atomic64_fetch_inc)        << 
3269         return arch_atomic64_fetch_inc(v);    << 
3270 #else                                         << 
3271         return raw_atomic64_fetch_add_acquire << 
3272 #endif                                        << 
3273 }                                                1683 }
                                                   >> 1684 #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
                                                   >> 1685 #endif
3274                                                  1686 
3275 /**                                           !! 1687 #ifndef arch_atomic64_fetch_inc_release
3276  * raw_atomic64_fetch_inc_release() - atomic  << 
3277  * @v: pointer to atomic64_t                  << 
3278  *                                            << 
3279  * Atomically updates @v to (@v + 1) with rel << 
3280  *                                            << 
3281  * Safe to use in noinstr code; prefer atomic << 
3282  *                                            << 
3283  * Return: The original value of @v.          << 
3284  */                                           << 
3285 static __always_inline s64                       1688 static __always_inline s64
3286 raw_atomic64_fetch_inc_release(atomic64_t *v) !! 1689 arch_atomic64_fetch_inc_release(atomic64_t *v)
3287 {                                                1690 {
3288 #if defined(arch_atomic64_fetch_inc_release)  << 
3289         return arch_atomic64_fetch_inc_releas << 
3290 #elif defined(arch_atomic64_fetch_inc_relaxed << 
3291         __atomic_release_fence();                1691         __atomic_release_fence();
3292         return arch_atomic64_fetch_inc_relaxe    1692         return arch_atomic64_fetch_inc_relaxed(v);
3293 #elif defined(arch_atomic64_fetch_inc)        << 
3294         return arch_atomic64_fetch_inc(v);    << 
3295 #else                                         << 
3296         return raw_atomic64_fetch_add_release << 
3297 #endif                                        << 
3298 }                                                1693 }
                                                   >> 1694 #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
                                                   >> 1695 #endif
3299                                                  1696 
3300 /**                                           !! 1697 #ifndef arch_atomic64_fetch_inc
3301  * raw_atomic64_fetch_inc_relaxed() - atomic  << 
3302  * @v: pointer to atomic64_t                  << 
3303  *                                            << 
3304  * Atomically updates @v to (@v + 1) with rel << 
3305  *                                            << 
3306  * Safe to use in noinstr code; prefer atomic << 
3307  *                                            << 
3308  * Return: The original value of @v.          << 
3309  */                                           << 
3310 static __always_inline s64                       1698 static __always_inline s64
3311 raw_atomic64_fetch_inc_relaxed(atomic64_t *v) !! 1699 arch_atomic64_fetch_inc(atomic64_t *v)
3312 {                                                1700 {
3313 #if defined(arch_atomic64_fetch_inc_relaxed)  !! 1701         s64 ret;
3314         return arch_atomic64_fetch_inc_relaxe !! 1702         __atomic_pre_full_fence();
3315 #elif defined(arch_atomic64_fetch_inc)        !! 1703         ret = arch_atomic64_fetch_inc_relaxed(v);
3316         return arch_atomic64_fetch_inc(v);    !! 1704         __atomic_post_full_fence();
3317 #else                                         !! 1705         return ret;
3318         return raw_atomic64_fetch_add_relaxed << 
3319 #endif                                        << 
3320 }                                                1706 }
                                                   >> 1707 #define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
                                                   >> 1708 #endif
3321                                                  1709 
3322 /**                                           !! 1710 #endif /* arch_atomic64_fetch_inc_relaxed */
3323  * raw_atomic64_dec() - atomic decrement with !! 1711 
3324  * @v: pointer to atomic64_t                  !! 1712 #ifndef arch_atomic64_dec
3325  *                                            << 
3326  * Atomically updates @v to (@v - 1) with rel << 
3327  *                                            << 
3328  * Safe to use in noinstr code; prefer atomic << 
3329  *                                            << 
3330  * Return: Nothing.                           << 
3331  */                                           << 
3332 static __always_inline void                      1713 static __always_inline void
3333 raw_atomic64_dec(atomic64_t *v)               !! 1714 arch_atomic64_dec(atomic64_t *v)
3334 {                                                1715 {
3335 #if defined(arch_atomic64_dec)                !! 1716         arch_atomic64_sub(1, v);
3336         arch_atomic64_dec(v);                 !! 1717 }
3337 #else                                         !! 1718 #define arch_atomic64_dec arch_atomic64_dec
3338         raw_atomic64_sub(1, v);               << 
3339 #endif                                           1719 #endif
                                                   >> 1720 
                                                   >> 1721 #ifndef arch_atomic64_dec_return_relaxed
                                                   >> 1722 #ifdef arch_atomic64_dec_return
                                                   >> 1723 #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
                                                   >> 1724 #define arch_atomic64_dec_return_release arch_atomic64_dec_return
                                                   >> 1725 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
                                                   >> 1726 #endif /* arch_atomic64_dec_return */
                                                   >> 1727 
                                                   >> 1728 #ifndef arch_atomic64_dec_return
                                                   >> 1729 static __always_inline s64
                                                   >> 1730 arch_atomic64_dec_return(atomic64_t *v)
                                                   >> 1731 {
                                                   >> 1732         return arch_atomic64_sub_return(1, v);
3340 }                                                1733 }
                                                   >> 1734 #define arch_atomic64_dec_return arch_atomic64_dec_return
                                                   >> 1735 #endif
3341                                                  1736 
3342 /**                                           !! 1737 #ifndef arch_atomic64_dec_return_acquire
3343  * raw_atomic64_dec_return() - atomic decreme << 
3344  * @v: pointer to atomic64_t                  << 
3345  *                                            << 
3346  * Atomically updates @v to (@v - 1) with ful << 
3347  *                                            << 
3348  * Safe to use in noinstr code; prefer atomic << 
3349  *                                            << 
3350  * Return: The updated value of @v.           << 
3351  */                                           << 
3352 static __always_inline s64                       1738 static __always_inline s64
3353 raw_atomic64_dec_return(atomic64_t *v)        !! 1739 arch_atomic64_dec_return_acquire(atomic64_t *v)
3354 {                                                1740 {
3355 #if defined(arch_atomic64_dec_return)         !! 1741         return arch_atomic64_sub_return_acquire(1, v);
3356         return arch_atomic64_dec_return(v);   !! 1742 }
3357 #elif defined(arch_atomic64_dec_return_relaxe !! 1743 #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
3358         s64 ret;                              << 
3359         __atomic_pre_full_fence();            << 
3360         ret = arch_atomic64_dec_return_relaxe << 
3361         __atomic_post_full_fence();           << 
3362         return ret;                           << 
3363 #else                                         << 
3364         return raw_atomic64_sub_return(1, v); << 
3365 #endif                                           1744 #endif
                                                   >> 1745 
                                                   >> 1746 #ifndef arch_atomic64_dec_return_release
                                                   >> 1747 static __always_inline s64
                                                   >> 1748 arch_atomic64_dec_return_release(atomic64_t *v)
                                                   >> 1749 {
                                                   >> 1750         return arch_atomic64_sub_return_release(1, v);
3366 }                                                1751 }
                                                   >> 1752 #define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
                                                   >> 1753 #endif
3367                                                  1754 
3368 /**                                           !! 1755 #ifndef arch_atomic64_dec_return_relaxed
3369  * raw_atomic64_dec_return_acquire() - atomic !! 1756 static __always_inline s64
3370  * @v: pointer to atomic64_t                  !! 1757 arch_atomic64_dec_return_relaxed(atomic64_t *v)
3371  *                                            !! 1758 {
3372  * Atomically updates @v to (@v - 1) with acq !! 1759         return arch_atomic64_sub_return_relaxed(1, v);
3373  *                                            !! 1760 }
3374  * Safe to use in noinstr code; prefer atomic !! 1761 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
3375  *                                            !! 1762 #endif
3376  * Return: The updated value of @v.           !! 1763 
3377  */                                           !! 1764 #else /* arch_atomic64_dec_return_relaxed */
                                                   >> 1765 
                                                   >> 1766 #ifndef arch_atomic64_dec_return_acquire
3378 static __always_inline s64                       1767 static __always_inline s64
3379 raw_atomic64_dec_return_acquire(atomic64_t *v !! 1768 arch_atomic64_dec_return_acquire(atomic64_t *v)
3380 {                                                1769 {
3381 #if defined(arch_atomic64_dec_return_acquire) << 
3382         return arch_atomic64_dec_return_acqui << 
3383 #elif defined(arch_atomic64_dec_return_relaxe << 
3384         s64 ret = arch_atomic64_dec_return_re    1770         s64 ret = arch_atomic64_dec_return_relaxed(v);
3385         __atomic_acquire_fence();                1771         __atomic_acquire_fence();
3386         return ret;                              1772         return ret;
3387 #elif defined(arch_atomic64_dec_return)       << 
3388         return arch_atomic64_dec_return(v);   << 
3389 #else                                         << 
3390         return raw_atomic64_sub_return_acquir << 
3391 #endif                                        << 
3392 }                                                1773 }
                                                   >> 1774 #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
                                                   >> 1775 #endif
3393                                                  1776 
3394 /**                                           !! 1777 #ifndef arch_atomic64_dec_return_release
3395  * raw_atomic64_dec_return_release() - atomic << 
3396  * @v: pointer to atomic64_t                  << 
3397  *                                            << 
3398  * Atomically updates @v to (@v - 1) with rel << 
3399  *                                            << 
3400  * Safe to use in noinstr code; prefer atomic << 
3401  *                                            << 
3402  * Return: The updated value of @v.           << 
3403  */                                           << 
3404 static __always_inline s64                       1778 static __always_inline s64
3405 raw_atomic64_dec_return_release(atomic64_t *v !! 1779 arch_atomic64_dec_return_release(atomic64_t *v)
3406 {                                                1780 {
3407 #if defined(arch_atomic64_dec_return_release) << 
3408         return arch_atomic64_dec_return_relea << 
3409 #elif defined(arch_atomic64_dec_return_relaxe << 
3410         __atomic_release_fence();                1781         __atomic_release_fence();
3411         return arch_atomic64_dec_return_relax    1782         return arch_atomic64_dec_return_relaxed(v);
3412 #elif defined(arch_atomic64_dec_return)       << 
3413         return arch_atomic64_dec_return(v);   << 
3414 #else                                         << 
3415         return raw_atomic64_sub_return_releas << 
3416 #endif                                        << 
3417 }                                                1783 }
3418                                               !! 1784 #define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
3419 /**                                           << 
3420  * raw_atomic64_dec_return_relaxed() - atomic << 
3421  * @v: pointer to atomic64_t                  << 
3422  *                                            << 
3423  * Atomically updates @v to (@v - 1) with rel << 
3424  *                                            << 
3425  * Safe to use in noinstr code; prefer atomic << 
3426  *                                            << 
3427  * Return: The updated value of @v.           << 
3428  */                                           << 
3429 static __always_inline s64                    << 
3430 raw_atomic64_dec_return_relaxed(atomic64_t *v << 
3431 {                                             << 
3432 #if defined(arch_atomic64_dec_return_relaxed) << 
3433         return arch_atomic64_dec_return_relax << 
3434 #elif defined(arch_atomic64_dec_return)       << 
3435         return arch_atomic64_dec_return(v);   << 
3436 #else                                         << 
3437         return raw_atomic64_sub_return_relaxe << 
3438 #endif                                           1785 #endif
3439 }                                             << 
3440                                                  1786 
3441 /**                                           !! 1787 #ifndef arch_atomic64_dec_return
3442  * raw_atomic64_fetch_dec() - atomic decremen << 
3443  * @v: pointer to atomic64_t                  << 
3444  *                                            << 
3445  * Atomically updates @v to (@v - 1) with ful << 
3446  *                                            << 
3447  * Safe to use in noinstr code; prefer atomic << 
3448  *                                            << 
3449  * Return: The original value of @v.          << 
3450  */                                           << 
3451 static __always_inline s64                       1788 static __always_inline s64
3452 raw_atomic64_fetch_dec(atomic64_t *v)         !! 1789 arch_atomic64_dec_return(atomic64_t *v)
3453 {                                                1790 {
3454 #if defined(arch_atomic64_fetch_dec)          << 
3455         return arch_atomic64_fetch_dec(v);    << 
3456 #elif defined(arch_atomic64_fetch_dec_relaxed << 
3457         s64 ret;                                 1791         s64 ret;
3458         __atomic_pre_full_fence();               1792         __atomic_pre_full_fence();
3459         ret = arch_atomic64_fetch_dec_relaxed !! 1793         ret = arch_atomic64_dec_return_relaxed(v);
3460         __atomic_post_full_fence();              1794         __atomic_post_full_fence();
3461         return ret;                              1795         return ret;
3462 #else                                         << 
3463         return raw_atomic64_fetch_sub(1, v);  << 
3464 #endif                                        << 
3465 }                                                1796 }
                                                   >> 1797 #define arch_atomic64_dec_return arch_atomic64_dec_return
                                                   >> 1798 #endif
3466                                                  1799 
3467 /**                                           !! 1800 #endif /* arch_atomic64_dec_return_relaxed */
3468  * raw_atomic64_fetch_dec_acquire() - atomic  !! 1801 
3469  * @v: pointer to atomic64_t                  !! 1802 #ifndef arch_atomic64_fetch_dec_relaxed
3470  *                                            !! 1803 #ifdef arch_atomic64_fetch_dec
3471  * Atomically updates @v to (@v - 1) with acq !! 1804 #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
3472  *                                            !! 1805 #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
3473  * Safe to use in noinstr code; prefer atomic !! 1806 #define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
3474  *                                            !! 1807 #endif /* arch_atomic64_fetch_dec */
3475  * Return: The original value of @v.          !! 1808 
3476  */                                           !! 1809 #ifndef arch_atomic64_fetch_dec
3477 static __always_inline s64                       1810 static __always_inline s64
3478 raw_atomic64_fetch_dec_acquire(atomic64_t *v) !! 1811 arch_atomic64_fetch_dec(atomic64_t *v)
3479 {                                                1812 {
3480 #if defined(arch_atomic64_fetch_dec_acquire)  !! 1813         return arch_atomic64_fetch_sub(1, v);
3481         return arch_atomic64_fetch_dec_acquir << 
3482 #elif defined(arch_atomic64_fetch_dec_relaxed << 
3483         s64 ret = arch_atomic64_fetch_dec_rel << 
3484         __atomic_acquire_fence();             << 
3485         return ret;                           << 
3486 #elif defined(arch_atomic64_fetch_dec)        << 
3487         return arch_atomic64_fetch_dec(v);    << 
3488 #else                                         << 
3489         return raw_atomic64_fetch_sub_acquire << 
3490 #endif                                        << 
3491 }                                                1814 }
                                                   >> 1815 #define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
                                                   >> 1816 #endif
3492                                                  1817 
3493 /**                                           !! 1818 #ifndef arch_atomic64_fetch_dec_acquire
3494  * raw_atomic64_fetch_dec_release() - atomic  << 
3495  * @v: pointer to atomic64_t                  << 
3496  *                                            << 
3497  * Atomically updates @v to (@v - 1) with rel << 
3498  *                                            << 
3499  * Safe to use in noinstr code; prefer atomic << 
3500  *                                            << 
3501  * Return: The original value of @v.          << 
3502  */                                           << 
3503 static __always_inline s64                       1819 static __always_inline s64
3504 raw_atomic64_fetch_dec_release(atomic64_t *v) !! 1820 arch_atomic64_fetch_dec_acquire(atomic64_t *v)
3505 {                                                1821 {
3506 #if defined(arch_atomic64_fetch_dec_release)  !! 1822         return arch_atomic64_fetch_sub_acquire(1, v);
3507         return arch_atomic64_fetch_dec_releas !! 1823 }
3508 #elif defined(arch_atomic64_fetch_dec_relaxed !! 1824 #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
3509         __atomic_release_fence();             << 
3510         return arch_atomic64_fetch_dec_relaxe << 
3511 #elif defined(arch_atomic64_fetch_dec)        << 
3512         return arch_atomic64_fetch_dec(v);    << 
3513 #else                                         << 
3514         return raw_atomic64_fetch_sub_release << 
3515 #endif                                           1825 #endif
                                                   >> 1826 
                                                   >> 1827 #ifndef arch_atomic64_fetch_dec_release
                                                   >> 1828 static __always_inline s64
                                                   >> 1829 arch_atomic64_fetch_dec_release(atomic64_t *v)
                                                   >> 1830 {
                                                   >> 1831         return arch_atomic64_fetch_sub_release(1, v);
3516 }                                                1832 }
                                                   >> 1833 #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
                                                   >> 1834 #endif
3517                                                  1835 
3518 /**                                           !! 1836 #ifndef arch_atomic64_fetch_dec_relaxed
3519  * raw_atomic64_fetch_dec_relaxed() - atomic  << 
3520  * @v: pointer to atomic64_t                  << 
3521  *                                            << 
3522  * Atomically updates @v to (@v - 1) with rel << 
3523  *                                            << 
3524  * Safe to use in noinstr code; prefer atomic << 
3525  *                                            << 
3526  * Return: The original value of @v.          << 
3527  */                                           << 
3528 static __always_inline s64                       1837 static __always_inline s64
3529 raw_atomic64_fetch_dec_relaxed(atomic64_t *v) !! 1838 arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
3530 {                                                1839 {
3531 #if defined(arch_atomic64_fetch_dec_relaxed)  !! 1840         return arch_atomic64_fetch_sub_relaxed(1, v);
3532         return arch_atomic64_fetch_dec_relaxe !! 1841 }
3533 #elif defined(arch_atomic64_fetch_dec)        !! 1842 #define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
3534         return arch_atomic64_fetch_dec(v);    << 
3535 #else                                         << 
3536         return raw_atomic64_fetch_sub_relaxed << 
3537 #endif                                           1843 #endif
                                                   >> 1844 
                                                   >> 1845 #else /* arch_atomic64_fetch_dec_relaxed */
                                                   >> 1846 
                                                   >> 1847 #ifndef arch_atomic64_fetch_dec_acquire
                                                   >> 1848 static __always_inline s64
                                                   >> 1849 arch_atomic64_fetch_dec_acquire(atomic64_t *v)
                                                   >> 1850 {
                                                   >> 1851         s64 ret = arch_atomic64_fetch_dec_relaxed(v);
                                                   >> 1852         __atomic_acquire_fence();
                                                   >> 1853         return ret;
3538 }                                                1854 }
                                                   >> 1855 #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
                                                   >> 1856 #endif
3539                                                  1857 
3540 /**                                           !! 1858 #ifndef arch_atomic64_fetch_dec_release
3541  * raw_atomic64_and() - atomic bitwise AND wi !! 1859 static __always_inline s64
3542  * @i: s64 value                              !! 1860 arch_atomic64_fetch_dec_release(atomic64_t *v)
3543  * @v: pointer to atomic64_t                  << 
3544  *                                            << 
3545  * Atomically updates @v to (@v & @i) with re << 
3546  *                                            << 
3547  * Safe to use in noinstr code; prefer atomic << 
3548  *                                            << 
3549  * Return: Nothing.                           << 
3550  */                                           << 
3551 static __always_inline void                   << 
3552 raw_atomic64_and(s64 i, atomic64_t *v)        << 
3553 {                                                1861 {
3554         arch_atomic64_and(i, v);              !! 1862         __atomic_release_fence();
                                                   >> 1863         return arch_atomic64_fetch_dec_relaxed(v);
3555 }                                                1864 }
                                                   >> 1865 #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
                                                   >> 1866 #endif
3556                                                  1867 
3557 /**                                           !! 1868 #ifndef arch_atomic64_fetch_dec
3558  * raw_atomic64_fetch_and() - atomic bitwise  << 
3559  * @i: s64 value                              << 
3560  * @v: pointer to atomic64_t                  << 
3561  *                                            << 
3562  * Atomically updates @v to (@v & @i) with fu << 
3563  *                                            << 
3564  * Safe to use in noinstr code; prefer atomic << 
3565  *                                            << 
3566  * Return: The original value of @v.          << 
3567  */                                           << 
3568 static __always_inline s64                       1869 static __always_inline s64
3569 raw_atomic64_fetch_and(s64 i, atomic64_t *v)  !! 1870 arch_atomic64_fetch_dec(atomic64_t *v)
3570 {                                                1871 {
3571 #if defined(arch_atomic64_fetch_and)          << 
3572         return arch_atomic64_fetch_and(i, v); << 
3573 #elif defined(arch_atomic64_fetch_and_relaxed << 
3574         s64 ret;                                 1872         s64 ret;
3575         __atomic_pre_full_fence();               1873         __atomic_pre_full_fence();
3576         ret = arch_atomic64_fetch_and_relaxed !! 1874         ret = arch_atomic64_fetch_dec_relaxed(v);
3577         __atomic_post_full_fence();              1875         __atomic_post_full_fence();
3578         return ret;                              1876         return ret;
3579 #else                                         << 
3580 #error "Unable to define raw_atomic64_fetch_a << 
3581 #endif                                        << 
3582 }                                                1877 }
                                                   >> 1878 #define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
                                                   >> 1879 #endif
3583                                                  1880 
3584 /**                                           !! 1881 #endif /* arch_atomic64_fetch_dec_relaxed */
3585  * raw_atomic64_fetch_and_acquire() - atomic  !! 1882 
3586  * @i: s64 value                              !! 1883 #ifndef arch_atomic64_fetch_and_relaxed
3587  * @v: pointer to atomic64_t                  !! 1884 #define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
3588  *                                            !! 1885 #define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
3589  * Atomically updates @v to (@v & @i) with ac !! 1886 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
3590  *                                            !! 1887 #else /* arch_atomic64_fetch_and_relaxed */
3591  * Safe to use in noinstr code; prefer atomic !! 1888 
3592  *                                            !! 1889 #ifndef arch_atomic64_fetch_and_acquire
3593  * Return: The original value of @v.          << 
3594  */                                           << 
3595 static __always_inline s64                       1890 static __always_inline s64
3596 raw_atomic64_fetch_and_acquire(s64 i, atomic6 !! 1891 arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3597 {                                                1892 {
3598 #if defined(arch_atomic64_fetch_and_acquire)  << 
3599         return arch_atomic64_fetch_and_acquir << 
3600 #elif defined(arch_atomic64_fetch_and_relaxed << 
3601         s64 ret = arch_atomic64_fetch_and_rel    1893         s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3602         __atomic_acquire_fence();                1894         __atomic_acquire_fence();
3603         return ret;                              1895         return ret;
3604 #elif defined(arch_atomic64_fetch_and)        << 
3605         return arch_atomic64_fetch_and(i, v); << 
3606 #else                                         << 
3607 #error "Unable to define raw_atomic64_fetch_a << 
3608 #endif                                        << 
3609 }                                                1896 }
                                                   >> 1897 #define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
                                                   >> 1898 #endif
3610                                                  1899 
3611 /**                                           !! 1900 #ifndef arch_atomic64_fetch_and_release
3612  * raw_atomic64_fetch_and_release() - atomic  << 
3613  * @i: s64 value                              << 
3614  * @v: pointer to atomic64_t                  << 
3615  *                                            << 
3616  * Atomically updates @v to (@v & @i) with re << 
3617  *                                            << 
3618  * Safe to use in noinstr code; prefer atomic << 
3619  *                                            << 
3620  * Return: The original value of @v.          << 
3621  */                                           << 
3622 static __always_inline s64                       1901 static __always_inline s64
3623 raw_atomic64_fetch_and_release(s64 i, atomic6 !! 1902 arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3624 {                                                1903 {
3625 #if defined(arch_atomic64_fetch_and_release)  << 
3626         return arch_atomic64_fetch_and_releas << 
3627 #elif defined(arch_atomic64_fetch_and_relaxed << 
3628         __atomic_release_fence();                1904         __atomic_release_fence();
3629         return arch_atomic64_fetch_and_relaxe    1905         return arch_atomic64_fetch_and_relaxed(i, v);
3630 #elif defined(arch_atomic64_fetch_and)        << 
3631         return arch_atomic64_fetch_and(i, v); << 
3632 #else                                         << 
3633 #error "Unable to define raw_atomic64_fetch_a << 
3634 #endif                                        << 
3635 }                                                1906 }
                                                   >> 1907 #define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
                                                   >> 1908 #endif
3636                                                  1909 
3637 /**                                           !! 1910 #ifndef arch_atomic64_fetch_and
3638  * raw_atomic64_fetch_and_relaxed() - atomic  << 
3639  * @i: s64 value                              << 
3640  * @v: pointer to atomic64_t                  << 
3641  *                                            << 
3642  * Atomically updates @v to (@v & @i) with re << 
3643  *                                            << 
3644  * Safe to use in noinstr code; prefer atomic << 
3645  *                                            << 
3646  * Return: The original value of @v.          << 
3647  */                                           << 
3648 static __always_inline s64                       1911 static __always_inline s64
3649 raw_atomic64_fetch_and_relaxed(s64 i, atomic6 !! 1912 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
3650 {                                                1913 {
3651 #if defined(arch_atomic64_fetch_and_relaxed)  !! 1914         s64 ret;
3652         return arch_atomic64_fetch_and_relaxe !! 1915         __atomic_pre_full_fence();
3653 #elif defined(arch_atomic64_fetch_and)        !! 1916         ret = arch_atomic64_fetch_and_relaxed(i, v);
3654         return arch_atomic64_fetch_and(i, v); !! 1917         __atomic_post_full_fence();
3655 #else                                         !! 1918         return ret;
3656 #error "Unable to define raw_atomic64_fetch_a << 
3657 #endif                                        << 
3658 }                                                1919 }
                                                   >> 1920 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
                                                   >> 1921 #endif
3659                                                  1922 
3660 /**                                           !! 1923 #endif /* arch_atomic64_fetch_and_relaxed */
3661  * raw_atomic64_andnot() - atomic bitwise AND !! 1924 
3662  * @i: s64 value                              !! 1925 #ifndef arch_atomic64_andnot
3663  * @v: pointer to atomic64_t                  << 
3664  *                                            << 
3665  * Atomically updates @v to (@v & ~@i) with r << 
3666  *                                            << 
3667  * Safe to use in noinstr code; prefer atomic << 
3668  *                                            << 
3669  * Return: Nothing.                           << 
3670  */                                           << 
3671 static __always_inline void                      1926 static __always_inline void
3672 raw_atomic64_andnot(s64 i, atomic64_t *v)     !! 1927 arch_atomic64_andnot(s64 i, atomic64_t *v)
3673 {                                                1928 {
3674 #if defined(arch_atomic64_andnot)             !! 1929         arch_atomic64_and(~i, v);
3675         arch_atomic64_andnot(i, v);           << 
3676 #else                                         << 
3677         raw_atomic64_and(~i, v);              << 
3678 #endif                                        << 
3679 }                                                1930 }
                                                   >> 1931 #define arch_atomic64_andnot arch_atomic64_andnot
                                                   >> 1932 #endif
3680                                                  1933 
3681 /**                                           !! 1934 #ifndef arch_atomic64_fetch_andnot_relaxed
3682  * raw_atomic64_fetch_andnot() - atomic bitwi !! 1935 #ifdef arch_atomic64_fetch_andnot
3683  * @i: s64 value                              !! 1936 #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
3684  * @v: pointer to atomic64_t                  !! 1937 #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
3685  *                                            !! 1938 #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
3686  * Atomically updates @v to (@v & ~@i) with f !! 1939 #endif /* arch_atomic64_fetch_andnot */
3687  *                                            !! 1940 
3688  * Safe to use in noinstr code; prefer atomic !! 1941 #ifndef arch_atomic64_fetch_andnot
3689  *                                            << 
3690  * Return: The original value of @v.          << 
3691  */                                           << 
3692 static __always_inline s64                       1942 static __always_inline s64
3693 raw_atomic64_fetch_andnot(s64 i, atomic64_t * !! 1943 arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3694 {                                                1944 {
3695 #if defined(arch_atomic64_fetch_andnot)       !! 1945         return arch_atomic64_fetch_and(~i, v);
3696         return arch_atomic64_fetch_andnot(i,  << 
3697 #elif defined(arch_atomic64_fetch_andnot_rela << 
3698         s64 ret;                              << 
3699         __atomic_pre_full_fence();            << 
3700         ret = arch_atomic64_fetch_andnot_rela << 
3701         __atomic_post_full_fence();           << 
3702         return ret;                           << 
3703 #else                                         << 
3704         return raw_atomic64_fetch_and(~i, v); << 
3705 #endif                                        << 
3706 }                                                1946 }
                                                   >> 1947 #define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
                                                   >> 1948 #endif
3707                                                  1949 
3708 /**                                           !! 1950 #ifndef arch_atomic64_fetch_andnot_acquire
3709  * raw_atomic64_fetch_andnot_acquire() - atom << 
3710  * @i: s64 value                              << 
3711  * @v: pointer to atomic64_t                  << 
3712  *                                            << 
3713  * Atomically updates @v to (@v & ~@i) with a << 
3714  *                                            << 
3715  * Safe to use in noinstr code; prefer atomic << 
3716  *                                            << 
3717  * Return: The original value of @v.          << 
3718  */                                           << 
3719 static __always_inline s64                       1951 static __always_inline s64
3720 raw_atomic64_fetch_andnot_acquire(s64 i, atom !! 1952 arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3721 {                                                1953 {
3722 #if defined(arch_atomic64_fetch_andnot_acquir !! 1954         return arch_atomic64_fetch_and_acquire(~i, v);
3723         return arch_atomic64_fetch_andnot_acq << 
3724 #elif defined(arch_atomic64_fetch_andnot_rela << 
3725         s64 ret = arch_atomic64_fetch_andnot_ << 
3726         __atomic_acquire_fence();             << 
3727         return ret;                           << 
3728 #elif defined(arch_atomic64_fetch_andnot)     << 
3729         return arch_atomic64_fetch_andnot(i,  << 
3730 #else                                         << 
3731         return raw_atomic64_fetch_and_acquire << 
3732 #endif                                        << 
3733 }                                                1955 }
                                                   >> 1956 #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
                                                   >> 1957 #endif
3734                                                  1958 
3735 /**                                           !! 1959 #ifndef arch_atomic64_fetch_andnot_release
3736  * raw_atomic64_fetch_andnot_release() - atom << 
3737  * @i: s64 value                              << 
3738  * @v: pointer to atomic64_t                  << 
3739  *                                            << 
3740  * Atomically updates @v to (@v & ~@i) with r << 
3741  *                                            << 
3742  * Safe to use in noinstr code; prefer atomic << 
3743  *                                            << 
3744  * Return: The original value of @v.          << 
3745  */                                           << 
3746 static __always_inline s64                       1960 static __always_inline s64
3747 raw_atomic64_fetch_andnot_release(s64 i, atom !! 1961 arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3748 {                                                1962 {
3749 #if defined(arch_atomic64_fetch_andnot_releas !! 1963         return arch_atomic64_fetch_and_release(~i, v);
3750         return arch_atomic64_fetch_andnot_rel << 
3751 #elif defined(arch_atomic64_fetch_andnot_rela << 
3752         __atomic_release_fence();             << 
3753         return arch_atomic64_fetch_andnot_rel << 
3754 #elif defined(arch_atomic64_fetch_andnot)     << 
3755         return arch_atomic64_fetch_andnot(i,  << 
3756 #else                                         << 
3757         return raw_atomic64_fetch_and_release << 
3758 #endif                                        << 
3759 }                                                1964 }
                                                   >> 1965 #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
                                                   >> 1966 #endif
3760                                                  1967 
3761 /**                                           !! 1968 #ifndef arch_atomic64_fetch_andnot_relaxed
3762  * raw_atomic64_fetch_andnot_relaxed() - atom << 
3763  * @i: s64 value                              << 
3764  * @v: pointer to atomic64_t                  << 
3765  *                                            << 
3766  * Atomically updates @v to (@v & ~@i) with r << 
3767  *                                            << 
3768  * Safe to use in noinstr code; prefer atomic << 
3769  *                                            << 
3770  * Return: The original value of @v.          << 
3771  */                                           << 
3772 static __always_inline s64                       1969 static __always_inline s64
3773 raw_atomic64_fetch_andnot_relaxed(s64 i, atom !! 1970 arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
3774 {                                                1971 {
3775 #if defined(arch_atomic64_fetch_andnot_relaxe !! 1972         return arch_atomic64_fetch_and_relaxed(~i, v);
3776         return arch_atomic64_fetch_andnot_rel !! 1973 }
3777 #elif defined(arch_atomic64_fetch_andnot)     !! 1974 #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
3778         return arch_atomic64_fetch_andnot(i,  << 
3779 #else                                         << 
3780         return raw_atomic64_fetch_and_relaxed << 
3781 #endif                                           1975 #endif
                                                   >> 1976 
                                                   >> 1977 #else /* arch_atomic64_fetch_andnot_relaxed */
                                                   >> 1978 
                                                   >> 1979 #ifndef arch_atomic64_fetch_andnot_acquire
                                                   >> 1980 static __always_inline s64
                                                   >> 1981 arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
                                                   >> 1982 {
                                                   >> 1983         s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
                                                   >> 1984         __atomic_acquire_fence();
                                                   >> 1985         return ret;
3782 }                                                1986 }
                                                   >> 1987 #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
                                                   >> 1988 #endif
3783                                                  1989 
3784 /**                                           !! 1990 #ifndef arch_atomic64_fetch_andnot_release
3785  * raw_atomic64_or() - atomic bitwise OR with !! 1991 static __always_inline s64
3786  * @i: s64 value                              !! 1992 arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3787  * @v: pointer to atomic64_t                  << 
3788  *                                            << 
3789  * Atomically updates @v to (@v | @i) with re << 
3790  *                                            << 
3791  * Safe to use in noinstr code; prefer atomic << 
3792  *                                            << 
3793  * Return: Nothing.                           << 
3794  */                                           << 
3795 static __always_inline void                   << 
3796 raw_atomic64_or(s64 i, atomic64_t *v)         << 
3797 {                                                1993 {
3798         arch_atomic64_or(i, v);               !! 1994         __atomic_release_fence();
                                                   >> 1995         return arch_atomic64_fetch_andnot_relaxed(i, v);
3799 }                                                1996 }
                                                   >> 1997 #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
                                                   >> 1998 #endif
3800                                                  1999 
3801 /**                                           !! 2000 #ifndef arch_atomic64_fetch_andnot
3802  * raw_atomic64_fetch_or() - atomic bitwise O << 
3803  * @i: s64 value                              << 
3804  * @v: pointer to atomic64_t                  << 
3805  *                                            << 
3806  * Atomically updates @v to (@v | @i) with fu << 
3807  *                                            << 
3808  * Safe to use in noinstr code; prefer atomic << 
3809  *                                            << 
3810  * Return: The original value of @v.          << 
3811  */                                           << 
3812 static __always_inline s64                       2001 static __always_inline s64
3813 raw_atomic64_fetch_or(s64 i, atomic64_t *v)   !! 2002 arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3814 {                                                2003 {
3815 #if defined(arch_atomic64_fetch_or)           << 
3816         return arch_atomic64_fetch_or(i, v);  << 
3817 #elif defined(arch_atomic64_fetch_or_relaxed) << 
3818         s64 ret;                                 2004         s64 ret;
3819         __atomic_pre_full_fence();               2005         __atomic_pre_full_fence();
3820         ret = arch_atomic64_fetch_or_relaxed( !! 2006         ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3821         __atomic_post_full_fence();              2007         __atomic_post_full_fence();
3822         return ret;                              2008         return ret;
3823 #else                                         << 
3824 #error "Unable to define raw_atomic64_fetch_o << 
3825 #endif                                        << 
3826 }                                                2009 }
                                                   >> 2010 #define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
                                                   >> 2011 #endif
3827                                                  2012 
3828 /**                                           !! 2013 #endif /* arch_atomic64_fetch_andnot_relaxed */
3829  * raw_atomic64_fetch_or_acquire() - atomic b !! 2014 
3830  * @i: s64 value                              !! 2015 #ifndef arch_atomic64_fetch_or_relaxed
3831  * @v: pointer to atomic64_t                  !! 2016 #define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
3832  *                                            !! 2017 #define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
3833  * Atomically updates @v to (@v | @i) with ac !! 2018 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
3834  *                                            !! 2019 #else /* arch_atomic64_fetch_or_relaxed */
3835  * Safe to use in noinstr code; prefer atomic !! 2020 
3836  *                                            !! 2021 #ifndef arch_atomic64_fetch_or_acquire
3837  * Return: The original value of @v.          << 
3838  */                                           << 
3839 static __always_inline s64                       2022 static __always_inline s64
3840 raw_atomic64_fetch_or_acquire(s64 i, atomic64 !! 2023 arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3841 {                                                2024 {
3842 #if defined(arch_atomic64_fetch_or_acquire)   << 
3843         return arch_atomic64_fetch_or_acquire << 
3844 #elif defined(arch_atomic64_fetch_or_relaxed) << 
3845         s64 ret = arch_atomic64_fetch_or_rela    2025         s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3846         __atomic_acquire_fence();                2026         __atomic_acquire_fence();
3847         return ret;                              2027         return ret;
3848 #elif defined(arch_atomic64_fetch_or)         << 
3849         return arch_atomic64_fetch_or(i, v);  << 
3850 #else                                         << 
3851 #error "Unable to define raw_atomic64_fetch_o << 
3852 #endif                                        << 
3853 }                                                2028 }
                                                   >> 2029 #define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
                                                   >> 2030 #endif
3854                                                  2031 
3855 /**                                           !! 2032 #ifndef arch_atomic64_fetch_or_release
3856  * raw_atomic64_fetch_or_release() - atomic b << 
3857  * @i: s64 value                              << 
3858  * @v: pointer to atomic64_t                  << 
3859  *                                            << 
3860  * Atomically updates @v to (@v | @i) with re << 
3861  *                                            << 
3862  * Safe to use in noinstr code; prefer atomic << 
3863  *                                            << 
3864  * Return: The original value of @v.          << 
3865  */                                           << 
3866 static __always_inline s64                       2033 static __always_inline s64
3867 raw_atomic64_fetch_or_release(s64 i, atomic64 !! 2034 arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3868 {                                                2035 {
3869 #if defined(arch_atomic64_fetch_or_release)   << 
3870         return arch_atomic64_fetch_or_release << 
3871 #elif defined(arch_atomic64_fetch_or_relaxed) << 
3872         __atomic_release_fence();                2036         __atomic_release_fence();
3873         return arch_atomic64_fetch_or_relaxed    2037         return arch_atomic64_fetch_or_relaxed(i, v);
3874 #elif defined(arch_atomic64_fetch_or)         << 
3875         return arch_atomic64_fetch_or(i, v);  << 
3876 #else                                         << 
3877 #error "Unable to define raw_atomic64_fetch_o << 
3878 #endif                                        << 
3879 }                                                2038 }
3880                                               !! 2039 #define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
3881 /**                                           << 
3882  * raw_atomic64_fetch_or_relaxed() - atomic b << 
3883  * @i: s64 value                              << 
3884  * @v: pointer to atomic64_t                  << 
3885  *                                            << 
3886  * Atomically updates @v to (@v | @i) with re << 
3887  *                                            << 
3888  * Safe to use in noinstr code; prefer atomic << 
3889  *                                            << 
3890  * Return: The original value of @v.          << 
3891  */                                           << 
3892 static __always_inline s64                    << 
3893 raw_atomic64_fetch_or_relaxed(s64 i, atomic64 << 
3894 {                                             << 
3895 #if defined(arch_atomic64_fetch_or_relaxed)   << 
3896         return arch_atomic64_fetch_or_relaxed << 
3897 #elif defined(arch_atomic64_fetch_or)         << 
3898         return arch_atomic64_fetch_or(i, v);  << 
3899 #else                                         << 
3900 #error "Unable to define raw_atomic64_fetch_o << 
3901 #endif                                           2040 #endif
3902 }                                             << 
3903                                               << 
3904 /**                                           << 
3905  * raw_atomic64_xor() - atomic bitwise XOR wi << 
3906  * @i: s64 value                              << 
3907  * @v: pointer to atomic64_t                  << 
3908  *                                            << 
3909  * Atomically updates @v to (@v ^ @i) with re << 
3910  *                                            << 
3911  * Safe to use in noinstr code; prefer atomic << 
3912  *                                            << 
3913  * Return: Nothing.                           << 
3914  */                                           << 
3915 static __always_inline void                   << 
3916 raw_atomic64_xor(s64 i, atomic64_t *v)        << 
3917 {                                             << 
3918         arch_atomic64_xor(i, v);              << 
3919 }                                             << 
3920                                                  2041 
3921 /**                                           !! 2042 #ifndef arch_atomic64_fetch_or
3922  * raw_atomic64_fetch_xor() - atomic bitwise  << 
3923  * @i: s64 value                              << 
3924  * @v: pointer to atomic64_t                  << 
3925  *                                            << 
3926  * Atomically updates @v to (@v ^ @i) with fu << 
3927  *                                            << 
3928  * Safe to use in noinstr code; prefer atomic << 
3929  *                                            << 
3930  * Return: The original value of @v.          << 
3931  */                                           << 
3932 static __always_inline s64                       2043 static __always_inline s64
3933 raw_atomic64_fetch_xor(s64 i, atomic64_t *v)  !! 2044 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
3934 {                                                2045 {
3935 #if defined(arch_atomic64_fetch_xor)          << 
3936         return arch_atomic64_fetch_xor(i, v); << 
3937 #elif defined(arch_atomic64_fetch_xor_relaxed << 
3938         s64 ret;                                 2046         s64 ret;
3939         __atomic_pre_full_fence();               2047         __atomic_pre_full_fence();
3940         ret = arch_atomic64_fetch_xor_relaxed !! 2048         ret = arch_atomic64_fetch_or_relaxed(i, v);
3941         __atomic_post_full_fence();              2049         __atomic_post_full_fence();
3942         return ret;                              2050         return ret;
3943 #else                                         << 
3944 #error "Unable to define raw_atomic64_fetch_x << 
3945 #endif                                        << 
3946 }                                                2051 }
                                                   >> 2052 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
                                                   >> 2053 #endif
3947                                                  2054 
3948 /**                                           !! 2055 #endif /* arch_atomic64_fetch_or_relaxed */
3949  * raw_atomic64_fetch_xor_acquire() - atomic  !! 2056 
3950  * @i: s64 value                              !! 2057 #ifndef arch_atomic64_fetch_xor_relaxed
3951  * @v: pointer to atomic64_t                  !! 2058 #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
3952  *                                            !! 2059 #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
3953  * Atomically updates @v to (@v ^ @i) with ac !! 2060 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
3954  *                                            !! 2061 #else /* arch_atomic64_fetch_xor_relaxed */
3955  * Safe to use in noinstr code; prefer atomic !! 2062 
3956  *                                            !! 2063 #ifndef arch_atomic64_fetch_xor_acquire
3957  * Return: The original value of @v.          << 
3958  */                                           << 
3959 static __always_inline s64                       2064 static __always_inline s64
3960 raw_atomic64_fetch_xor_acquire(s64 i, atomic6 !! 2065 arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3961 {                                                2066 {
3962 #if defined(arch_atomic64_fetch_xor_acquire)  << 
3963         return arch_atomic64_fetch_xor_acquir << 
3964 #elif defined(arch_atomic64_fetch_xor_relaxed << 
3965         s64 ret = arch_atomic64_fetch_xor_rel    2067         s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3966         __atomic_acquire_fence();                2068         __atomic_acquire_fence();
3967         return ret;                              2069         return ret;
3968 #elif defined(arch_atomic64_fetch_xor)        << 
3969         return arch_atomic64_fetch_xor(i, v); << 
3970 #else                                         << 
3971 #error "Unable to define raw_atomic64_fetch_x << 
3972 #endif                                        << 
3973 }                                                2070 }
                                                   >> 2071 #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
                                                   >> 2072 #endif
3974                                                  2073 
3975 /**                                           !! 2074 #ifndef arch_atomic64_fetch_xor_release
3976  * raw_atomic64_fetch_xor_release() - atomic  << 
3977  * @i: s64 value                              << 
3978  * @v: pointer to atomic64_t                  << 
3979  *                                            << 
3980  * Atomically updates @v to (@v ^ @i) with re << 
3981  *                                            << 
3982  * Safe to use in noinstr code; prefer atomic << 
3983  *                                            << 
3984  * Return: The original value of @v.          << 
3985  */                                           << 
3986 static __always_inline s64                       2075 static __always_inline s64
3987 raw_atomic64_fetch_xor_release(s64 i, atomic6 !! 2076 arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3988 {                                                2077 {
3989 #if defined(arch_atomic64_fetch_xor_release)  << 
3990         return arch_atomic64_fetch_xor_releas << 
3991 #elif defined(arch_atomic64_fetch_xor_relaxed << 
3992         __atomic_release_fence();                2078         __atomic_release_fence();
3993         return arch_atomic64_fetch_xor_relaxe    2079         return arch_atomic64_fetch_xor_relaxed(i, v);
3994 #elif defined(arch_atomic64_fetch_xor)        << 
3995         return arch_atomic64_fetch_xor(i, v); << 
3996 #else                                         << 
3997 #error "Unable to define raw_atomic64_fetch_x << 
3998 #endif                                        << 
3999 }                                                2080 }
4000                                               !! 2081 #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
4001 /**                                           << 
4002  * raw_atomic64_fetch_xor_relaxed() - atomic  << 
4003  * @i: s64 value                              << 
4004  * @v: pointer to atomic64_t                  << 
4005  *                                            << 
4006  * Atomically updates @v to (@v ^ @i) with re << 
4007  *                                            << 
4008  * Safe to use in noinstr code; prefer atomic << 
4009  *                                            << 
4010  * Return: The original value of @v.          << 
4011  */                                           << 
4012 static __always_inline s64                    << 
4013 raw_atomic64_fetch_xor_relaxed(s64 i, atomic6 << 
4014 {                                             << 
4015 #if defined(arch_atomic64_fetch_xor_relaxed)  << 
4016         return arch_atomic64_fetch_xor_relaxe << 
4017 #elif defined(arch_atomic64_fetch_xor)        << 
4018         return arch_atomic64_fetch_xor(i, v); << 
4019 #else                                         << 
4020 #error "Unable to define raw_atomic64_fetch_x << 
4021 #endif                                           2082 #endif
4022 }                                             << 
4023                                                  2083 
4024 /**                                           !! 2084 #ifndef arch_atomic64_fetch_xor
4025  * raw_atomic64_xchg() - atomic exchange with << 
4026  * @v: pointer to atomic64_t                  << 
4027  * @new: s64 value to assign                  << 
4028  *                                            << 
4029  * Atomically updates @v to @new with full or << 
4030  *                                            << 
4031  * Safe to use in noinstr code; prefer atomic << 
4032  *                                            << 
4033  * Return: The original value of @v.          << 
4034  */                                           << 
4035 static __always_inline s64                       2085 static __always_inline s64
4036 raw_atomic64_xchg(atomic64_t *v, s64 new)     !! 2086 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
4037 {                                                2087 {
4038 #if defined(arch_atomic64_xchg)               << 
4039         return arch_atomic64_xchg(v, new);    << 
4040 #elif defined(arch_atomic64_xchg_relaxed)     << 
4041         s64 ret;                                 2088         s64 ret;
4042         __atomic_pre_full_fence();               2089         __atomic_pre_full_fence();
4043         ret = arch_atomic64_xchg_relaxed(v, n !! 2090         ret = arch_atomic64_fetch_xor_relaxed(i, v);
4044         __atomic_post_full_fence();              2091         __atomic_post_full_fence();
4045         return ret;                              2092         return ret;
4046 #else                                         << 
4047         return raw_xchg(&v->counter, new);    << 
4048 #endif                                        << 
4049 }                                                2093 }
                                                   >> 2094 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
                                                   >> 2095 #endif
4050                                                  2096 
4051 /**                                           !! 2097 #endif /* arch_atomic64_fetch_xor_relaxed */
4052  * raw_atomic64_xchg_acquire() - atomic excha !! 2098 
4053  * @v: pointer to atomic64_t                  !! 2099 #ifndef arch_atomic64_xchg_relaxed
4054  * @new: s64 value to assign                  !! 2100 #define arch_atomic64_xchg_acquire arch_atomic64_xchg
4055  *                                            !! 2101 #define arch_atomic64_xchg_release arch_atomic64_xchg
4056  * Atomically updates @v to @new with acquire !! 2102 #define arch_atomic64_xchg_relaxed arch_atomic64_xchg
4057  *                                            !! 2103 #else /* arch_atomic64_xchg_relaxed */
4058  * Safe to use in noinstr code; prefer atomic !! 2104 
4059  *                                            !! 2105 #ifndef arch_atomic64_xchg_acquire
4060  * Return: The original value of @v.          << 
4061  */                                           << 
4062 static __always_inline s64                       2106 static __always_inline s64
4063 raw_atomic64_xchg_acquire(atomic64_t *v, s64  !! 2107 arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
4064 {                                                2108 {
4065 #if defined(arch_atomic64_xchg_acquire)       !! 2109         s64 ret = arch_atomic64_xchg_relaxed(v, i);
4066         return arch_atomic64_xchg_acquire(v,  << 
4067 #elif defined(arch_atomic64_xchg_relaxed)     << 
4068         s64 ret = arch_atomic64_xchg_relaxed( << 
4069         __atomic_acquire_fence();                2110         __atomic_acquire_fence();
4070         return ret;                              2111         return ret;
4071 #elif defined(arch_atomic64_xchg)             << 
4072         return arch_atomic64_xchg(v, new);    << 
4073 #else                                         << 
4074         return raw_xchg_acquire(&v->counter,  << 
4075 #endif                                        << 
4076 }                                                2112 }
                                                   >> 2113 #define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
                                                   >> 2114 #endif
4077                                                  2115 
4078 /**                                           !! 2116 #ifndef arch_atomic64_xchg_release
4079  * raw_atomic64_xchg_release() - atomic excha << 
4080  * @v: pointer to atomic64_t                  << 
4081  * @new: s64 value to assign                  << 
4082  *                                            << 
4083  * Atomically updates @v to @new with release << 
4084  *                                            << 
4085  * Safe to use in noinstr code; prefer atomic << 
4086  *                                            << 
4087  * Return: The original value of @v.          << 
4088  */                                           << 
4089 static __always_inline s64                       2117 static __always_inline s64
4090 raw_atomic64_xchg_release(atomic64_t *v, s64  !! 2118 arch_atomic64_xchg_release(atomic64_t *v, s64 i)
4091 {                                                2119 {
4092 #if defined(arch_atomic64_xchg_release)       << 
4093         return arch_atomic64_xchg_release(v,  << 
4094 #elif defined(arch_atomic64_xchg_relaxed)     << 
4095         __atomic_release_fence();                2120         __atomic_release_fence();
4096         return arch_atomic64_xchg_relaxed(v,  !! 2121         return arch_atomic64_xchg_relaxed(v, i);
4097 #elif defined(arch_atomic64_xchg)             << 
4098         return arch_atomic64_xchg(v, new);    << 
4099 #else                                         << 
4100         return raw_xchg_release(&v->counter,  << 
4101 #endif                                        << 
4102 }                                                2122 }
4103                                               !! 2123 #define arch_atomic64_xchg_release arch_atomic64_xchg_release
4104 /**                                           << 
4105  * raw_atomic64_xchg_relaxed() - atomic excha << 
4106  * @v: pointer to atomic64_t                  << 
4107  * @new: s64 value to assign                  << 
4108  *                                            << 
4109  * Atomically updates @v to @new with relaxed << 
4110  *                                            << 
4111  * Safe to use in noinstr code; prefer atomic << 
4112  *                                            << 
4113  * Return: The original value of @v.          << 
4114  */                                           << 
4115 static __always_inline s64                    << 
4116 raw_atomic64_xchg_relaxed(atomic64_t *v, s64  << 
4117 {                                             << 
4118 #if defined(arch_atomic64_xchg_relaxed)       << 
4119         return arch_atomic64_xchg_relaxed(v,  << 
4120 #elif defined(arch_atomic64_xchg)             << 
4121         return arch_atomic64_xchg(v, new);    << 
4122 #else                                         << 
4123         return raw_xchg_relaxed(&v->counter,  << 
4124 #endif                                           2124 #endif
4125 }                                             << 
4126                                                  2125 
4127 /**                                           !! 2126 #ifndef arch_atomic64_xchg
4128  * raw_atomic64_cmpxchg() - atomic compare an << 
4129  * @v: pointer to atomic64_t                  << 
4130  * @old: s64 value to compare with            << 
4131  * @new: s64 value to assign                  << 
4132  *                                            << 
4133  * If (@v == @old), atomically updates @v to  << 
4134  * Otherwise, @v is not modified and relaxed  << 
4135  *                                            << 
4136  * Safe to use in noinstr code; prefer atomic << 
4137  *                                            << 
4138  * Return: The original value of @v.          << 
4139  */                                           << 
4140 static __always_inline s64                       2127 static __always_inline s64
4141 raw_atomic64_cmpxchg(atomic64_t *v, s64 old,  !! 2128 arch_atomic64_xchg(atomic64_t *v, s64 i)
4142 {                                                2129 {
4143 #if defined(arch_atomic64_cmpxchg)            << 
4144         return arch_atomic64_cmpxchg(v, old,  << 
4145 #elif defined(arch_atomic64_cmpxchg_relaxed)  << 
4146         s64 ret;                                 2130         s64 ret;
4147         __atomic_pre_full_fence();               2131         __atomic_pre_full_fence();
4148         ret = arch_atomic64_cmpxchg_relaxed(v !! 2132         ret = arch_atomic64_xchg_relaxed(v, i);
4149         __atomic_post_full_fence();              2133         __atomic_post_full_fence();
4150         return ret;                              2134         return ret;
4151 #else                                         << 
4152         return raw_cmpxchg(&v->counter, old,  << 
4153 #endif                                        << 
4154 }                                                2135 }
                                                   >> 2136 #define arch_atomic64_xchg arch_atomic64_xchg
                                                   >> 2137 #endif
4155                                                  2138 
4156 /**                                           !! 2139 #endif /* arch_atomic64_xchg_relaxed */
4157  * raw_atomic64_cmpxchg_acquire() - atomic co !! 2140 
4158  * @v: pointer to atomic64_t                  !! 2141 #ifndef arch_atomic64_cmpxchg_relaxed
4159  * @old: s64 value to compare with            !! 2142 #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
4160  * @new: s64 value to assign                  !! 2143 #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
4161  *                                            !! 2144 #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
4162  * If (@v == @old), atomically updates @v to  !! 2145 #else /* arch_atomic64_cmpxchg_relaxed */
4163  * Otherwise, @v is not modified and relaxed  !! 2146 
4164  *                                            !! 2147 #ifndef arch_atomic64_cmpxchg_acquire
4165  * Safe to use in noinstr code; prefer atomic << 
4166  *                                            << 
4167  * Return: The original value of @v.          << 
4168  */                                           << 
4169 static __always_inline s64                       2148 static __always_inline s64
4170 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s !! 2149 arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4171 {                                                2150 {
4172 #if defined(arch_atomic64_cmpxchg_acquire)    << 
4173         return arch_atomic64_cmpxchg_acquire( << 
4174 #elif defined(arch_atomic64_cmpxchg_relaxed)  << 
4175         s64 ret = arch_atomic64_cmpxchg_relax    2151         s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4176         __atomic_acquire_fence();                2152         __atomic_acquire_fence();
4177         return ret;                              2153         return ret;
4178 #elif defined(arch_atomic64_cmpxchg)          << 
4179         return arch_atomic64_cmpxchg(v, old,  << 
4180 #else                                         << 
4181         return raw_cmpxchg_acquire(&v->counte << 
4182 #endif                                        << 
4183 }                                                2154 }
                                                   >> 2155 #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
                                                   >> 2156 #endif
4184                                                  2157 
4185 /**                                           !! 2158 #ifndef arch_atomic64_cmpxchg_release
4186  * raw_atomic64_cmpxchg_release() - atomic co << 
4187  * @v: pointer to atomic64_t                  << 
4188  * @old: s64 value to compare with            << 
4189  * @new: s64 value to assign                  << 
4190  *                                            << 
4191  * If (@v == @old), atomically updates @v to  << 
4192  * Otherwise, @v is not modified and relaxed  << 
4193  *                                            << 
4194  * Safe to use in noinstr code; prefer atomic << 
4195  *                                            << 
4196  * Return: The original value of @v.          << 
4197  */                                           << 
4198 static __always_inline s64                       2159 static __always_inline s64
4199 raw_atomic64_cmpxchg_release(atomic64_t *v, s !! 2160 arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4200 {                                                2161 {
4201 #if defined(arch_atomic64_cmpxchg_release)    << 
4202         return arch_atomic64_cmpxchg_release( << 
4203 #elif defined(arch_atomic64_cmpxchg_relaxed)  << 
4204         __atomic_release_fence();                2162         __atomic_release_fence();
4205         return arch_atomic64_cmpxchg_relaxed(    2163         return arch_atomic64_cmpxchg_relaxed(v, old, new);
4206 #elif defined(arch_atomic64_cmpxchg)          << 
4207         return arch_atomic64_cmpxchg(v, old,  << 
4208 #else                                         << 
4209         return raw_cmpxchg_release(&v->counte << 
4210 #endif                                        << 
4211 }                                                2164 }
                                                   >> 2165 #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
                                                   >> 2166 #endif
4212                                                  2167 
4213 /**                                           !! 2168 #ifndef arch_atomic64_cmpxchg
4214  * raw_atomic64_cmpxchg_relaxed() - atomic co << 
4215  * @v: pointer to atomic64_t                  << 
4216  * @old: s64 value to compare with            << 
4217  * @new: s64 value to assign                  << 
4218  *                                            << 
4219  * If (@v == @old), atomically updates @v to  << 
4220  * Otherwise, @v is not modified and relaxed  << 
4221  *                                            << 
4222  * Safe to use in noinstr code; prefer atomic << 
4223  *                                            << 
4224  * Return: The original value of @v.          << 
4225  */                                           << 
4226 static __always_inline s64                       2169 static __always_inline s64
4227 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s !! 2170 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4228 {                                                2171 {
4229 #if defined(arch_atomic64_cmpxchg_relaxed)    !! 2172         s64 ret;
4230         return arch_atomic64_cmpxchg_relaxed( !! 2173         __atomic_pre_full_fence();
4231 #elif defined(arch_atomic64_cmpxchg)          !! 2174         ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4232         return arch_atomic64_cmpxchg(v, old,  !! 2175         __atomic_post_full_fence();
4233 #else                                         !! 2176         return ret;
4234         return raw_cmpxchg_relaxed(&v->counte << 
4235 #endif                                        << 
4236 }                                                2177 }
                                                   >> 2178 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
                                                   >> 2179 #endif
4237                                                  2180 
4238 /**                                           !! 2181 #endif /* arch_atomic64_cmpxchg_relaxed */
4239  * raw_atomic64_try_cmpxchg() - atomic compar !! 2182 
4240  * @v: pointer to atomic64_t                  !! 2183 #ifndef arch_atomic64_try_cmpxchg_relaxed
4241  * @old: pointer to s64 value to compare with !! 2184 #ifdef arch_atomic64_try_cmpxchg
4242  * @new: s64 value to assign                  !! 2185 #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
4243  *                                            !! 2186 #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
4244  * If (@v == @old), atomically updates @v to  !! 2187 #define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
4245  * Otherwise, @v is not modified, @old is upd !! 2188 #endif /* arch_atomic64_try_cmpxchg */
4246  * and relaxed ordering is provided.          !! 2189 
4247  *                                            !! 2190 #ifndef arch_atomic64_try_cmpxchg
4248  * Safe to use in noinstr code; prefer atomic << 
4249  *                                            << 
4250  * Return: @true if the exchange occured, @fa << 
4251  */                                           << 
4252 static __always_inline bool                      2191 static __always_inline bool
4253 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 * !! 2192 arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4254 {                                                2193 {
4255 #if defined(arch_atomic64_try_cmpxchg)        << 
4256         return arch_atomic64_try_cmpxchg(v, o << 
4257 #elif defined(arch_atomic64_try_cmpxchg_relax << 
4258         bool ret;                             << 
4259         __atomic_pre_full_fence();            << 
4260         ret = arch_atomic64_try_cmpxchg_relax << 
4261         __atomic_post_full_fence();           << 
4262         return ret;                           << 
4263 #else                                         << 
4264         s64 r, o = *old;                         2194         s64 r, o = *old;
4265         r = raw_atomic64_cmpxchg(v, o, new);  !! 2195         r = arch_atomic64_cmpxchg(v, o, new);
4266         if (unlikely(r != o))                    2196         if (unlikely(r != o))
4267                 *old = r;                        2197                 *old = r;
4268         return likely(r == o);                   2198         return likely(r == o);
4269 #endif                                        << 
4270 }                                                2199 }
                                                   >> 2200 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
                                                   >> 2201 #endif
4271                                                  2202 
4272 /**                                           !! 2203 #ifndef arch_atomic64_try_cmpxchg_acquire
4273  * raw_atomic64_try_cmpxchg_acquire() - atomi << 
4274  * @v: pointer to atomic64_t                  << 
4275  * @old: pointer to s64 value to compare with << 
4276  * @new: s64 value to assign                  << 
4277  *                                            << 
4278  * If (@v == @old), atomically updates @v to  << 
4279  * Otherwise, @v is not modified, @old is upd << 
4280  * and relaxed ordering is provided.          << 
4281  *                                            << 
4282  * Safe to use in noinstr code; prefer atomic << 
4283  *                                            << 
4284  * Return: @true if the exchange occured, @fa << 
4285  */                                           << 
4286 static __always_inline bool                      2204 static __always_inline bool
4287 raw_atomic64_try_cmpxchg_acquire(atomic64_t * !! 2205 arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4288 {                                                2206 {
4289 #if defined(arch_atomic64_try_cmpxchg_acquire << 
4290         return arch_atomic64_try_cmpxchg_acqu << 
4291 #elif defined(arch_atomic64_try_cmpxchg_relax << 
4292         bool ret = arch_atomic64_try_cmpxchg_ << 
4293         __atomic_acquire_fence();             << 
4294         return ret;                           << 
4295 #elif defined(arch_atomic64_try_cmpxchg)      << 
4296         return arch_atomic64_try_cmpxchg(v, o << 
4297 #else                                         << 
4298         s64 r, o = *old;                         2207         s64 r, o = *old;
4299         r = raw_atomic64_cmpxchg_acquire(v, o !! 2208         r = arch_atomic64_cmpxchg_acquire(v, o, new);
4300         if (unlikely(r != o))                    2209         if (unlikely(r != o))
4301                 *old = r;                        2210                 *old = r;
4302         return likely(r == o);                   2211         return likely(r == o);
4303 #endif                                        << 
4304 }                                                2212 }
                                                   >> 2213 #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
                                                   >> 2214 #endif
4305                                                  2215 
4306 /**                                           !! 2216 #ifndef arch_atomic64_try_cmpxchg_release
4307  * raw_atomic64_try_cmpxchg_release() - atomi << 
4308  * @v: pointer to atomic64_t                  << 
4309  * @old: pointer to s64 value to compare with << 
4310  * @new: s64 value to assign                  << 
4311  *                                            << 
4312  * If (@v == @old), atomically updates @v to  << 
4313  * Otherwise, @v is not modified, @old is upd << 
4314  * and relaxed ordering is provided.          << 
4315  *                                            << 
4316  * Safe to use in noinstr code; prefer atomic << 
4317  *                                            << 
4318  * Return: @true if the exchange occured, @fa << 
4319  */                                           << 
4320 static __always_inline bool                      2217 static __always_inline bool
4321 raw_atomic64_try_cmpxchg_release(atomic64_t * !! 2218 arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4322 {                                                2219 {
4323 #if defined(arch_atomic64_try_cmpxchg_release << 
4324         return arch_atomic64_try_cmpxchg_rele << 
4325 #elif defined(arch_atomic64_try_cmpxchg_relax << 
4326         __atomic_release_fence();             << 
4327         return arch_atomic64_try_cmpxchg_rela << 
4328 #elif defined(arch_atomic64_try_cmpxchg)      << 
4329         return arch_atomic64_try_cmpxchg(v, o << 
4330 #else                                         << 
4331         s64 r, o = *old;                         2220         s64 r, o = *old;
4332         r = raw_atomic64_cmpxchg_release(v, o !! 2221         r = arch_atomic64_cmpxchg_release(v, o, new);
4333         if (unlikely(r != o))                    2222         if (unlikely(r != o))
4334                 *old = r;                        2223                 *old = r;
4335         return likely(r == o);                   2224         return likely(r == o);
4336 #endif                                        << 
4337 }                                                2225 }
                                                   >> 2226 #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
                                                   >> 2227 #endif
4338                                                  2228 
4339 /**                                           !! 2229 #ifndef arch_atomic64_try_cmpxchg_relaxed
4340  * raw_atomic64_try_cmpxchg_relaxed() - atomi << 
4341  * @v: pointer to atomic64_t                  << 
4342  * @old: pointer to s64 value to compare with << 
4343  * @new: s64 value to assign                  << 
4344  *                                            << 
4345  * If (@v == @old), atomically updates @v to  << 
4346  * Otherwise, @v is not modified, @old is upd << 
4347  * and relaxed ordering is provided.          << 
4348  *                                            << 
4349  * Safe to use in noinstr code; prefer atomic << 
4350  *                                            << 
4351  * Return: @true if the exchange occured, @fa << 
4352  */                                           << 
4353 static __always_inline bool                      2230 static __always_inline bool
4354 raw_atomic64_try_cmpxchg_relaxed(atomic64_t * !! 2231 arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
4355 {                                                2232 {
4356 #if defined(arch_atomic64_try_cmpxchg_relaxed << 
4357         return arch_atomic64_try_cmpxchg_rela << 
4358 #elif defined(arch_atomic64_try_cmpxchg)      << 
4359         return arch_atomic64_try_cmpxchg(v, o << 
4360 #else                                         << 
4361         s64 r, o = *old;                         2233         s64 r, o = *old;
4362         r = raw_atomic64_cmpxchg_relaxed(v, o !! 2234         r = arch_atomic64_cmpxchg_relaxed(v, o, new);
4363         if (unlikely(r != o))                    2235         if (unlikely(r != o))
4364                 *old = r;                        2236                 *old = r;
4365         return likely(r == o);                   2237         return likely(r == o);
4366 #endif                                        << 
4367 }                                                2238 }
                                                   >> 2239 #define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
                                                   >> 2240 #endif
4368                                                  2241 
4369 /**                                           !! 2242 #else /* arch_atomic64_try_cmpxchg_relaxed */
4370  * raw_atomic64_sub_and_test() - atomic subtr !! 2243 
4371  * @i: s64 value to subtract                  !! 2244 #ifndef arch_atomic64_try_cmpxchg_acquire
4372  * @v: pointer to atomic64_t                  << 
4373  *                                            << 
4374  * Atomically updates @v to (@v - @i) with fu << 
4375  *                                            << 
4376  * Safe to use in noinstr code; prefer atomic << 
4377  *                                            << 
4378  * Return: @true if the resulting value of @v << 
4379  */                                           << 
4380 static __always_inline bool                      2245 static __always_inline bool
4381 raw_atomic64_sub_and_test(s64 i, atomic64_t * !! 2246 arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4382 {                                                2247 {
4383 #if defined(arch_atomic64_sub_and_test)       !! 2248         bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4384         return arch_atomic64_sub_and_test(i,  !! 2249         __atomic_acquire_fence();
4385 #else                                         !! 2250         return ret;
4386         return raw_atomic64_sub_return(i, v)  << 
4387 #endif                                        << 
4388 }                                                2251 }
                                                   >> 2252 #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
                                                   >> 2253 #endif
4389                                                  2254 
4390 /**                                           !! 2255 #ifndef arch_atomic64_try_cmpxchg_release
4391  * raw_atomic64_dec_and_test() - atomic decre << 
4392  * @v: pointer to atomic64_t                  << 
4393  *                                            << 
4394  * Atomically updates @v to (@v - 1) with ful << 
4395  *                                            << 
4396  * Safe to use in noinstr code; prefer atomic << 
4397  *                                            << 
4398  * Return: @true if the resulting value of @v << 
4399  */                                           << 
4400 static __always_inline bool                      2256 static __always_inline bool
4401 raw_atomic64_dec_and_test(atomic64_t *v)      !! 2257 arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4402 {                                                2258 {
4403 #if defined(arch_atomic64_dec_and_test)       !! 2259         __atomic_release_fence();
4404         return arch_atomic64_dec_and_test(v); !! 2260         return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4405 #else                                         << 
4406         return raw_atomic64_dec_return(v) ==  << 
4407 #endif                                        << 
4408 }                                                2261 }
                                                   >> 2262 #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
                                                   >> 2263 #endif
4409                                                  2264 
4410 /**                                           !! 2265 #ifndef arch_atomic64_try_cmpxchg
4411  * raw_atomic64_inc_and_test() - atomic incre << 
4412  * @v: pointer to atomic64_t                  << 
4413  *                                            << 
4414  * Atomically updates @v to (@v + 1) with ful << 
4415  *                                            << 
4416  * Safe to use in noinstr code; prefer atomic << 
4417  *                                            << 
4418  * Return: @true if the resulting value of @v << 
4419  */                                           << 
4420 static __always_inline bool                      2266 static __always_inline bool
4421 raw_atomic64_inc_and_test(atomic64_t *v)      !! 2267 arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4422 {                                                2268 {
4423 #if defined(arch_atomic64_inc_and_test)       !! 2269         bool ret;
4424         return arch_atomic64_inc_and_test(v); !! 2270         __atomic_pre_full_fence();
4425 #else                                         !! 2271         ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4426         return raw_atomic64_inc_return(v) ==  !! 2272         __atomic_post_full_fence();
4427 #endif                                        !! 2273         return ret;
4428 }                                                2274 }
                                                   >> 2275 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
                                                   >> 2276 #endif
                                                   >> 2277 
                                                   >> 2278 #endif /* arch_atomic64_try_cmpxchg_relaxed */
4429                                                  2279 
                                                   >> 2280 #ifndef arch_atomic64_sub_and_test
4430 /**                                              2281 /**
4431  * raw_atomic64_add_negative() - atomic add a !! 2282  * arch_atomic64_sub_and_test - subtract value from variable and test result
4432  * @i: s64 value to add                       !! 2283  * @i: integer value to subtract
4433  * @v: pointer to atomic64_t                  !! 2284  * @v: pointer of type atomic64_t
4434  *                                            << 
4435  * Atomically updates @v to (@v + @i) with fu << 
4436  *                                               2285  *
4437  * Safe to use in noinstr code; prefer atomic !! 2286  * Atomically subtracts @i from @v and returns
4438  *                                            !! 2287  * true if the result is zero, or false for all
4439  * Return: @true if the resulting value of @v !! 2288  * other cases.
4440  */                                              2289  */
4441 static __always_inline bool                      2290 static __always_inline bool
4442 raw_atomic64_add_negative(s64 i, atomic64_t * !! 2291 arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
4443 {                                                2292 {
4444 #if defined(arch_atomic64_add_negative)       !! 2293         return arch_atomic64_sub_return(i, v) == 0;
4445         return arch_atomic64_add_negative(i,  << 
4446 #elif defined(arch_atomic64_add_negative_rela << 
4447         bool ret;                             << 
4448         __atomic_pre_full_fence();            << 
4449         ret = arch_atomic64_add_negative_rela << 
4450         __atomic_post_full_fence();           << 
4451         return ret;                           << 
4452 #else                                         << 
4453         return raw_atomic64_add_return(i, v)  << 
4454 #endif                                        << 
4455 }                                                2294 }
                                                   >> 2295 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
                                                   >> 2296 #endif
4456                                                  2297 
                                                   >> 2298 #ifndef arch_atomic64_dec_and_test
4457 /**                                              2299 /**
4458  * raw_atomic64_add_negative_acquire() - atom !! 2300  * arch_atomic64_dec_and_test - decrement and test
4459  * @i: s64 value to add                       !! 2301  * @v: pointer of type atomic64_t
4460  * @v: pointer to atomic64_t                  << 
4461  *                                               2302  *
4462  * Atomically updates @v to (@v + @i) with ac !! 2303  * Atomically decrements @v by 1 and
4463  *                                            !! 2304  * returns true if the result is 0, or false for all other
4464  * Safe to use in noinstr code; prefer atomic !! 2305  * cases.
4465  *                                            << 
4466  * Return: @true if the resulting value of @v << 
4467  */                                              2306  */
4468 static __always_inline bool                      2307 static __always_inline bool
4469 raw_atomic64_add_negative_acquire(s64 i, atom !! 2308 arch_atomic64_dec_and_test(atomic64_t *v)
4470 {                                                2309 {
4471 #if defined(arch_atomic64_add_negative_acquir !! 2310         return arch_atomic64_dec_return(v) == 0;
4472         return arch_atomic64_add_negative_acq << 
4473 #elif defined(arch_atomic64_add_negative_rela << 
4474         bool ret = arch_atomic64_add_negative << 
4475         __atomic_acquire_fence();             << 
4476         return ret;                           << 
4477 #elif defined(arch_atomic64_add_negative)     << 
4478         return arch_atomic64_add_negative(i,  << 
4479 #else                                         << 
4480         return raw_atomic64_add_return_acquir << 
4481 #endif                                        << 
4482 }                                                2311 }
                                                   >> 2312 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
                                                   >> 2313 #endif
4483                                                  2314 
                                                   >> 2315 #ifndef arch_atomic64_inc_and_test
4484 /**                                              2316 /**
4485  * raw_atomic64_add_negative_release() - atom !! 2317  * arch_atomic64_inc_and_test - increment and test
4486  * @i: s64 value to add                       !! 2318  * @v: pointer of type atomic64_t
4487  * @v: pointer to atomic64_t                  << 
4488  *                                               2319  *
4489  * Atomically updates @v to (@v + @i) with re !! 2320  * Atomically increments @v by 1
4490  *                                            !! 2321  * and returns true if the result is zero, or false for all
4491  * Safe to use in noinstr code; prefer atomic !! 2322  * other cases.
4492  *                                            << 
4493  * Return: @true if the resulting value of @v << 
4494  */                                              2323  */
4495 static __always_inline bool                      2324 static __always_inline bool
4496 raw_atomic64_add_negative_release(s64 i, atom !! 2325 arch_atomic64_inc_and_test(atomic64_t *v)
4497 {                                                2326 {
4498 #if defined(arch_atomic64_add_negative_releas !! 2327         return arch_atomic64_inc_return(v) == 0;
4499         return arch_atomic64_add_negative_rel << 
4500 #elif defined(arch_atomic64_add_negative_rela << 
4501         __atomic_release_fence();             << 
4502         return arch_atomic64_add_negative_rel << 
4503 #elif defined(arch_atomic64_add_negative)     << 
4504         return arch_atomic64_add_negative(i,  << 
4505 #else                                         << 
4506         return raw_atomic64_add_return_releas << 
4507 #endif                                        << 
4508 }                                                2328 }
                                                   >> 2329 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
                                                   >> 2330 #endif
4509                                                  2331 
                                                   >> 2332 #ifndef arch_atomic64_add_negative
4510 /**                                              2333 /**
4511  * raw_atomic64_add_negative_relaxed() - atom !! 2334  * arch_atomic64_add_negative - add and test if negative
4512  * @i: s64 value to add                       !! 2335  * @i: integer value to add
4513  * @v: pointer to atomic64_t                  !! 2336  * @v: pointer of type atomic64_t
4514  *                                            << 
4515  * Atomically updates @v to (@v + @i) with re << 
4516  *                                            << 
4517  * Safe to use in noinstr code; prefer atomic << 
4518  *                                               2337  *
4519  * Return: @true if the resulting value of @v !! 2338  * Atomically adds @i to @v and returns true
                                                   >> 2339  * if the result is negative, or false when
                                                   >> 2340  * result is greater than or equal to zero.
4520  */                                              2341  */
4521 static __always_inline bool                      2342 static __always_inline bool
4522 raw_atomic64_add_negative_relaxed(s64 i, atom !! 2343 arch_atomic64_add_negative(s64 i, atomic64_t *v)
4523 {                                                2344 {
4524 #if defined(arch_atomic64_add_negative_relaxe !! 2345         return arch_atomic64_add_return(i, v) < 0;
4525         return arch_atomic64_add_negative_rel << 
4526 #elif defined(arch_atomic64_add_negative)     << 
4527         return arch_atomic64_add_negative(i,  << 
4528 #else                                         << 
4529         return raw_atomic64_add_return_relaxe << 
4530 #endif                                        << 
4531 }                                                2346 }
                                                   >> 2347 #define arch_atomic64_add_negative arch_atomic64_add_negative
                                                   >> 2348 #endif
4532                                                  2349 
                                                   >> 2350 #ifndef arch_atomic64_fetch_add_unless
4533 /**                                              2351 /**
4534  * raw_atomic64_fetch_add_unless() - atomic a !! 2352  * arch_atomic64_fetch_add_unless - add unless the number is already a given value
4535  * @v: pointer to atomic64_t                  !! 2353  * @v: pointer of type atomic64_t
4536  * @a: s64 value to add                       !! 2354  * @a: the amount to add to v...
4537  * @u: s64 value to compare with              !! 2355  * @u: ...unless v is equal to u.
4538  *                                            << 
4539  * If (@v != @u), atomically updates @v to (@ << 
4540  * Otherwise, @v is not modified and relaxed  << 
4541  *                                               2356  *
4542  * Safe to use in noinstr code; prefer atomic !! 2357  * Atomically adds @a to @v, so long as @v was not already @u.
4543  *                                            !! 2358  * Returns original value of @v
4544  * Return: The original value of @v.          << 
4545  */                                              2359  */
4546 static __always_inline s64                       2360 static __always_inline s64
4547 raw_atomic64_fetch_add_unless(atomic64_t *v,  !! 2361 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
4548 {                                                2362 {
4549 #if defined(arch_atomic64_fetch_add_unless)   !! 2363         s64 c = arch_atomic64_read(v);
4550         return arch_atomic64_fetch_add_unless << 
4551 #else                                         << 
4552         s64 c = raw_atomic64_read(v);         << 
4553                                                  2364 
4554         do {                                     2365         do {
4555                 if (unlikely(c == u))            2366                 if (unlikely(c == u))
4556                         break;                   2367                         break;
4557         } while (!raw_atomic64_try_cmpxchg(v, !! 2368         } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
4558                                                  2369 
4559         return c;                                2370         return c;
4560 #endif                                        << 
4561 }                                                2371 }
                                                   >> 2372 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
                                                   >> 2373 #endif
4562                                                  2374 
                                                   >> 2375 #ifndef arch_atomic64_add_unless
4563 /**                                              2376 /**
4564  * raw_atomic64_add_unless() - atomic add unl !! 2377  * arch_atomic64_add_unless - add unless the number is already a given value
4565  * @v: pointer to atomic64_t                  !! 2378  * @v: pointer of type atomic64_t
4566  * @a: s64 value to add                       !! 2379  * @a: the amount to add to v...
4567  * @u: s64 value to compare with              !! 2380  * @u: ...unless v is equal to u.
4568  *                                               2381  *
4569  * If (@v != @u), atomically updates @v to (@ !! 2382  * Atomically adds @a to @v, if @v was not already @u.
4570  * Otherwise, @v is not modified and relaxed  !! 2383  * Returns true if the addition was done.
4571  *                                            << 
4572  * Safe to use in noinstr code; prefer atomic << 
4573  *                                            << 
4574  * Return: @true if @v was updated, @false ot << 
4575  */                                              2384  */
4576 static __always_inline bool                      2385 static __always_inline bool
4577 raw_atomic64_add_unless(atomic64_t *v, s64 a, !! 2386 arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4578 {                                                2387 {
4579 #if defined(arch_atomic64_add_unless)         !! 2388         return arch_atomic64_fetch_add_unless(v, a, u) != u;
4580         return arch_atomic64_add_unless(v, a, << 
4581 #else                                         << 
4582         return raw_atomic64_fetch_add_unless( << 
4583 #endif                                        << 
4584 }                                                2389 }
                                                   >> 2390 #define arch_atomic64_add_unless arch_atomic64_add_unless
                                                   >> 2391 #endif
4585                                                  2392 
                                                   >> 2393 #ifndef arch_atomic64_inc_not_zero
4586 /**                                              2394 /**
4587  * raw_atomic64_inc_not_zero() - atomic incre !! 2395  * arch_atomic64_inc_not_zero - increment unless the number is zero
4588  * @v: pointer to atomic64_t                  !! 2396  * @v: pointer of type atomic64_t
4589  *                                            << 
4590  * If (@v != 0), atomically updates @v to (@v << 
4591  * Otherwise, @v is not modified and relaxed  << 
4592  *                                               2397  *
4593  * Safe to use in noinstr code; prefer atomic !! 2398  * Atomically increments @v by 1, if @v is non-zero.
4594  *                                            !! 2399  * Returns true if the increment was done.
4595  * Return: @true if @v was updated, @false ot << 
4596  */                                              2400  */
4597 static __always_inline bool                      2401 static __always_inline bool
4598 raw_atomic64_inc_not_zero(atomic64_t *v)      !! 2402 arch_atomic64_inc_not_zero(atomic64_t *v)
4599 {                                                2403 {
4600 #if defined(arch_atomic64_inc_not_zero)       !! 2404         return arch_atomic64_add_unless(v, 1, 0);
4601         return arch_atomic64_inc_not_zero(v); << 
4602 #else                                         << 
4603         return raw_atomic64_add_unless(v, 1,  << 
4604 #endif                                        << 
4605 }                                                2405 }
                                                   >> 2406 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
                                                   >> 2407 #endif
4606                                                  2408 
4607 /**                                           !! 2409 #ifndef arch_atomic64_inc_unless_negative
4608  * raw_atomic64_inc_unless_negative() - atomi << 
4609  * @v: pointer to atomic64_t                  << 
4610  *                                            << 
4611  * If (@v >= 0), atomically updates @v to (@v << 
4612  * Otherwise, @v is not modified and relaxed  << 
4613  *                                            << 
4614  * Safe to use in noinstr code; prefer atomic << 
4615  *                                            << 
4616  * Return: @true if @v was updated, @false ot << 
4617  */                                           << 
4618 static __always_inline bool                      2410 static __always_inline bool
4619 raw_atomic64_inc_unless_negative(atomic64_t * !! 2411 arch_atomic64_inc_unless_negative(atomic64_t *v)
4620 {                                                2412 {
4621 #if defined(arch_atomic64_inc_unless_negative !! 2413         s64 c = arch_atomic64_read(v);
4622         return arch_atomic64_inc_unless_negat << 
4623 #else                                         << 
4624         s64 c = raw_atomic64_read(v);         << 
4625                                                  2414 
4626         do {                                     2415         do {
4627                 if (unlikely(c < 0))             2416                 if (unlikely(c < 0))
4628                         return false;            2417                         return false;
4629         } while (!raw_atomic64_try_cmpxchg(v, !! 2418         } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
4630                                                  2419 
4631         return true;                             2420         return true;
4632 #endif                                        << 
4633 }                                                2421 }
                                                   >> 2422 #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
                                                   >> 2423 #endif
4634                                                  2424 
4635 /**                                           !! 2425 #ifndef arch_atomic64_dec_unless_positive
4636  * raw_atomic64_dec_unless_positive() - atomi << 
4637  * @v: pointer to atomic64_t                  << 
4638  *                                            << 
4639  * If (@v <= 0), atomically updates @v to (@v << 
4640  * Otherwise, @v is not modified and relaxed  << 
4641  *                                            << 
4642  * Safe to use in noinstr code; prefer atomic << 
4643  *                                            << 
4644  * Return: @true if @v was updated, @false ot << 
4645  */                                           << 
4646 static __always_inline bool                      2426 static __always_inline bool
4647 raw_atomic64_dec_unless_positive(atomic64_t * !! 2427 arch_atomic64_dec_unless_positive(atomic64_t *v)
4648 {                                                2428 {
4649 #if defined(arch_atomic64_dec_unless_positive !! 2429         s64 c = arch_atomic64_read(v);
4650         return arch_atomic64_dec_unless_posit << 
4651 #else                                         << 
4652         s64 c = raw_atomic64_read(v);         << 
4653                                                  2430 
4654         do {                                     2431         do {
4655                 if (unlikely(c > 0))             2432                 if (unlikely(c > 0))
4656                         return false;            2433                         return false;
4657         } while (!raw_atomic64_try_cmpxchg(v, !! 2434         } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
4658                                                  2435 
4659         return true;                             2436         return true;
4660 #endif                                        << 
4661 }                                                2437 }
                                                   >> 2438 #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
                                                   >> 2439 #endif
4662                                                  2440 
4663 /**                                           !! 2441 #ifndef arch_atomic64_dec_if_positive
4664  * raw_atomic64_dec_if_positive() - atomic de << 
4665  * @v: pointer to atomic64_t                  << 
4666  *                                            << 
4667  * If (@v > 0), atomically updates @v to (@v  << 
4668  * Otherwise, @v is not modified and relaxed  << 
4669  *                                            << 
4670  * Safe to use in noinstr code; prefer atomic << 
4671  *                                            << 
4672  * Return: The old value of (@v - 1), regardl << 
4673  */                                           << 
4674 static __always_inline s64                       2442 static __always_inline s64
4675 raw_atomic64_dec_if_positive(atomic64_t *v)   !! 2443 arch_atomic64_dec_if_positive(atomic64_t *v)
4676 {                                                2444 {
4677 #if defined(arch_atomic64_dec_if_positive)    !! 2445         s64 dec, c = arch_atomic64_read(v);
4678         return arch_atomic64_dec_if_positive( << 
4679 #else                                         << 
4680         s64 dec, c = raw_atomic64_read(v);    << 
4681                                                  2446 
4682         do {                                     2447         do {
4683                 dec = c - 1;                     2448                 dec = c - 1;
4684                 if (unlikely(dec < 0))           2449                 if (unlikely(dec < 0))
4685                         break;                   2450                         break;
4686         } while (!raw_atomic64_try_cmpxchg(v, !! 2451         } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
4687                                                  2452 
4688         return dec;                              2453         return dec;
4689 #endif                                        << 
4690 }                                                2454 }
                                                   >> 2455 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
                                                   >> 2456 #endif
4691                                                  2457 
4692 #endif /* _LINUX_ATOMIC_FALLBACK_H */            2458 #endif /* _LINUX_ATOMIC_FALLBACK_H */
4693 // b565db590afeeff0d7c9485ccbca5bb6e155749f   !! 2459 // b5e87bdd5ede61470c29f7a7e4de781af3770f09
4694                                                  2460 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php