~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/atomic/atomic-arch-fallback.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/atomic/atomic-arch-fallback.h (Version linux-6.12-rc7) and /include/linux/atomic/atomic-arch-fallback.h (Version linux-6.4.16)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2                                                     2 
  3 // Generated by scripts/atomic/gen-atomic-fall      3 // Generated by scripts/atomic/gen-atomic-fallback.sh
  4 // DO NOT MODIFY THIS FILE DIRECTLY                 4 // DO NOT MODIFY THIS FILE DIRECTLY
  5                                                     5 
  6 #ifndef _LINUX_ATOMIC_FALLBACK_H                    6 #ifndef _LINUX_ATOMIC_FALLBACK_H
  7 #define _LINUX_ATOMIC_FALLBACK_H                    7 #define _LINUX_ATOMIC_FALLBACK_H
  8                                                     8 
  9 #include <linux/compiler.h>                         9 #include <linux/compiler.h>
 10                                                    10 
 11 #if defined(arch_xchg)                         !!  11 #ifndef arch_xchg_relaxed
 12 #define raw_xchg arch_xchg                     !!  12 #define arch_xchg_acquire arch_xchg
 13 #elif defined(arch_xchg_relaxed)               !!  13 #define arch_xchg_release arch_xchg
 14 #define raw_xchg(...) \                        !!  14 #define arch_xchg_relaxed arch_xchg
 15         __atomic_op_fence(arch_xchg, __VA_ARGS !!  15 #else /* arch_xchg_relaxed */
 16 #else                                          << 
 17 extern void raw_xchg_not_implemented(void);    << 
 18 #define raw_xchg(...) raw_xchg_not_implemented << 
 19 #endif                                         << 
 20                                                    16 
 21 #if defined(arch_xchg_acquire)                 !!  17 #ifndef arch_xchg_acquire
 22 #define raw_xchg_acquire arch_xchg_acquire     !!  18 #define arch_xchg_acquire(...) \
 23 #elif defined(arch_xchg_relaxed)               << 
 24 #define raw_xchg_acquire(...) \                << 
 25         __atomic_op_acquire(arch_xchg, __VA_AR     19         __atomic_op_acquire(arch_xchg, __VA_ARGS__)
 26 #elif defined(arch_xchg)                       << 
 27 #define raw_xchg_acquire arch_xchg             << 
 28 #else                                          << 
 29 extern void raw_xchg_acquire_not_implemented(v << 
 30 #define raw_xchg_acquire(...) raw_xchg_acquire << 
 31 #endif                                             20 #endif
 32                                                    21 
 33 #if defined(arch_xchg_release)                 !!  22 #ifndef arch_xchg_release
 34 #define raw_xchg_release arch_xchg_release     !!  23 #define arch_xchg_release(...) \
 35 #elif defined(arch_xchg_relaxed)               << 
 36 #define raw_xchg_release(...) \                << 
 37         __atomic_op_release(arch_xchg, __VA_AR     24         __atomic_op_release(arch_xchg, __VA_ARGS__)
 38 #elif defined(arch_xchg)                       << 
 39 #define raw_xchg_release arch_xchg             << 
 40 #else                                          << 
 41 extern void raw_xchg_release_not_implemented(v << 
 42 #define raw_xchg_release(...) raw_xchg_release << 
 43 #endif                                         << 
 44                                                << 
 45 #if defined(arch_xchg_relaxed)                 << 
 46 #define raw_xchg_relaxed arch_xchg_relaxed     << 
 47 #elif defined(arch_xchg)                       << 
 48 #define raw_xchg_relaxed arch_xchg             << 
 49 #else                                          << 
 50 extern void raw_xchg_relaxed_not_implemented(v << 
 51 #define raw_xchg_relaxed(...) raw_xchg_relaxed << 
 52 #endif                                         << 
 53                                                << 
 54 #if defined(arch_cmpxchg)                      << 
 55 #define raw_cmpxchg arch_cmpxchg               << 
 56 #elif defined(arch_cmpxchg_relaxed)            << 
 57 #define raw_cmpxchg(...) \                     << 
 58         __atomic_op_fence(arch_cmpxchg, __VA_A << 
 59 #else                                          << 
 60 extern void raw_cmpxchg_not_implemented(void); << 
 61 #define raw_cmpxchg(...) raw_cmpxchg_not_imple << 
 62 #endif                                             25 #endif
 63                                                    26 
 64 #if defined(arch_cmpxchg_acquire)              !!  27 #ifndef arch_xchg
 65 #define raw_cmpxchg_acquire arch_cmpxchg_acqui !!  28 #define arch_xchg(...) \
 66 #elif defined(arch_cmpxchg_relaxed)            !!  29         __atomic_op_fence(arch_xchg, __VA_ARGS__)
 67 #define raw_cmpxchg_acquire(...) \             !!  30 #endif
                                                   >>  31 
                                                   >>  32 #endif /* arch_xchg_relaxed */
                                                   >>  33 
                                                   >>  34 #ifndef arch_cmpxchg_relaxed
                                                   >>  35 #define arch_cmpxchg_acquire arch_cmpxchg
                                                   >>  36 #define arch_cmpxchg_release arch_cmpxchg
                                                   >>  37 #define arch_cmpxchg_relaxed arch_cmpxchg
                                                   >>  38 #else /* arch_cmpxchg_relaxed */
                                                   >>  39 
                                                   >>  40 #ifndef arch_cmpxchg_acquire
                                                   >>  41 #define arch_cmpxchg_acquire(...) \
 68         __atomic_op_acquire(arch_cmpxchg, __VA     42         __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
 69 #elif defined(arch_cmpxchg)                    << 
 70 #define raw_cmpxchg_acquire arch_cmpxchg       << 
 71 #else                                          << 
 72 extern void raw_cmpxchg_acquire_not_implemente << 
 73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_a << 
 74 #endif                                             43 #endif
 75                                                    44 
 76 #if defined(arch_cmpxchg_release)              !!  45 #ifndef arch_cmpxchg_release
 77 #define raw_cmpxchg_release arch_cmpxchg_relea !!  46 #define arch_cmpxchg_release(...) \
 78 #elif defined(arch_cmpxchg_relaxed)            << 
 79 #define raw_cmpxchg_release(...) \             << 
 80         __atomic_op_release(arch_cmpxchg, __VA     47         __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
 81 #elif defined(arch_cmpxchg)                    << 
 82 #define raw_cmpxchg_release arch_cmpxchg       << 
 83 #else                                          << 
 84 extern void raw_cmpxchg_release_not_implemente << 
 85 #define raw_cmpxchg_release(...) raw_cmpxchg_r << 
 86 #endif                                         << 
 87                                                << 
 88 #if defined(arch_cmpxchg_relaxed)              << 
 89 #define raw_cmpxchg_relaxed arch_cmpxchg_relax << 
 90 #elif defined(arch_cmpxchg)                    << 
 91 #define raw_cmpxchg_relaxed arch_cmpxchg       << 
 92 #else                                          << 
 93 extern void raw_cmpxchg_relaxed_not_implemente << 
 94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_r << 
 95 #endif                                         << 
 96                                                << 
 97 #if defined(arch_cmpxchg64)                    << 
 98 #define raw_cmpxchg64 arch_cmpxchg64           << 
 99 #elif defined(arch_cmpxchg64_relaxed)          << 
100 #define raw_cmpxchg64(...) \                   << 
101         __atomic_op_fence(arch_cmpxchg64, __VA << 
102 #else                                          << 
103 extern void raw_cmpxchg64_not_implemented(void << 
104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_i << 
105 #endif                                             48 #endif
106                                                    49 
107 #if defined(arch_cmpxchg64_acquire)            !!  50 #ifndef arch_cmpxchg
108 #define raw_cmpxchg64_acquire arch_cmpxchg64_a !!  51 #define arch_cmpxchg(...) \
109 #elif defined(arch_cmpxchg64_relaxed)          !!  52         __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
110 #define raw_cmpxchg64_acquire(...) \           !!  53 #endif
                                                   >>  54 
                                                   >>  55 #endif /* arch_cmpxchg_relaxed */
                                                   >>  56 
                                                   >>  57 #ifndef arch_cmpxchg64_relaxed
                                                   >>  58 #define arch_cmpxchg64_acquire arch_cmpxchg64
                                                   >>  59 #define arch_cmpxchg64_release arch_cmpxchg64
                                                   >>  60 #define arch_cmpxchg64_relaxed arch_cmpxchg64
                                                   >>  61 #else /* arch_cmpxchg64_relaxed */
                                                   >>  62 
                                                   >>  63 #ifndef arch_cmpxchg64_acquire
                                                   >>  64 #define arch_cmpxchg64_acquire(...) \
111         __atomic_op_acquire(arch_cmpxchg64, __     65         __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
112 #elif defined(arch_cmpxchg64)                  << 
113 #define raw_cmpxchg64_acquire arch_cmpxchg64   << 
114 #else                                          << 
115 extern void raw_cmpxchg64_acquire_not_implemen << 
116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg << 
117 #endif                                             66 #endif
118                                                    67 
119 #if defined(arch_cmpxchg64_release)            !!  68 #ifndef arch_cmpxchg64_release
120 #define raw_cmpxchg64_release arch_cmpxchg64_r !!  69 #define arch_cmpxchg64_release(...) \
121 #elif defined(arch_cmpxchg64_relaxed)          << 
122 #define raw_cmpxchg64_release(...) \           << 
123         __atomic_op_release(arch_cmpxchg64, __     70         __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
124 #elif defined(arch_cmpxchg64)                  << 
125 #define raw_cmpxchg64_release arch_cmpxchg64   << 
126 #else                                          << 
127 extern void raw_cmpxchg64_release_not_implemen << 
128 #define raw_cmpxchg64_release(...) raw_cmpxchg << 
129 #endif                                         << 
130                                                << 
131 #if defined(arch_cmpxchg64_relaxed)            << 
132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_r << 
133 #elif defined(arch_cmpxchg64)                  << 
134 #define raw_cmpxchg64_relaxed arch_cmpxchg64   << 
135 #else                                          << 
136 extern void raw_cmpxchg64_relaxed_not_implemen << 
137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg << 
138 #endif                                         << 
139                                                << 
140 #if defined(arch_cmpxchg128)                   << 
141 #define raw_cmpxchg128 arch_cmpxchg128         << 
142 #elif defined(arch_cmpxchg128_relaxed)         << 
143 #define raw_cmpxchg128(...) \                  << 
144         __atomic_op_fence(arch_cmpxchg128, __V << 
145 #else                                          << 
146 extern void raw_cmpxchg128_not_implemented(voi << 
147 #define raw_cmpxchg128(...) raw_cmpxchg128_not << 
148 #endif                                         << 
149                                                << 
150 #if defined(arch_cmpxchg128_acquire)           << 
151 #define raw_cmpxchg128_acquire arch_cmpxchg128 << 
152 #elif defined(arch_cmpxchg128_relaxed)         << 
153 #define raw_cmpxchg128_acquire(...) \          << 
154         __atomic_op_acquire(arch_cmpxchg128, _ << 
155 #elif defined(arch_cmpxchg128)                 << 
156 #define raw_cmpxchg128_acquire arch_cmpxchg128 << 
157 #else                                          << 
158 extern void raw_cmpxchg128_acquire_not_impleme << 
159 #define raw_cmpxchg128_acquire(...) raw_cmpxch << 
160 #endif                                         << 
161                                                << 
162 #if defined(arch_cmpxchg128_release)           << 
163 #define raw_cmpxchg128_release arch_cmpxchg128 << 
164 #elif defined(arch_cmpxchg128_relaxed)         << 
165 #define raw_cmpxchg128_release(...) \          << 
166         __atomic_op_release(arch_cmpxchg128, _ << 
167 #elif defined(arch_cmpxchg128)                 << 
168 #define raw_cmpxchg128_release arch_cmpxchg128 << 
169 #else                                          << 
170 extern void raw_cmpxchg128_release_not_impleme << 
171 #define raw_cmpxchg128_release(...) raw_cmpxch << 
172 #endif                                         << 
173                                                << 
174 #if defined(arch_cmpxchg128_relaxed)           << 
175 #define raw_cmpxchg128_relaxed arch_cmpxchg128 << 
176 #elif defined(arch_cmpxchg128)                 << 
177 #define raw_cmpxchg128_relaxed arch_cmpxchg128 << 
178 #else                                          << 
179 extern void raw_cmpxchg128_relaxed_not_impleme << 
180 #define raw_cmpxchg128_relaxed(...) raw_cmpxch << 
181 #endif                                         << 
182                                                << 
183 #if defined(arch_try_cmpxchg)                  << 
184 #define raw_try_cmpxchg arch_try_cmpxchg       << 
185 #elif defined(arch_try_cmpxchg_relaxed)        << 
186 #define raw_try_cmpxchg(...) \                 << 
187         __atomic_op_fence(arch_try_cmpxchg, __ << 
188 #else                                          << 
189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \   << 
190 ({ \                                           << 
191         typeof(*(_ptr)) *___op = (_oldp), ___o << 
192         ___r = raw_cmpxchg((_ptr), ___o, (_new << 
193         if (unlikely(___r != ___o)) \          << 
194                 *___op = ___r; \               << 
195         likely(___r == ___o); \                << 
196 })                                             << 
197 #endif                                             71 #endif
198                                                    72 
199 #if defined(arch_try_cmpxchg_acquire)          !!  73 #ifndef arch_cmpxchg64
200 #define raw_try_cmpxchg_acquire arch_try_cmpxc !!  74 #define arch_cmpxchg64(...) \
201 #elif defined(arch_try_cmpxchg_relaxed)        !!  75         __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
202 #define raw_try_cmpxchg_acquire(...) \         << 
203         __atomic_op_acquire(arch_try_cmpxchg,  << 
204 #elif defined(arch_try_cmpxchg)                << 
205 #define raw_try_cmpxchg_acquire arch_try_cmpxc << 
206 #else                                          << 
207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _ << 
208 ({ \                                           << 
209         typeof(*(_ptr)) *___op = (_oldp), ___o << 
210         ___r = raw_cmpxchg_acquire((_ptr), ___ << 
211         if (unlikely(___r != ___o)) \          << 
212                 *___op = ___r; \               << 
213         likely(___r == ___o); \                << 
214 })                                             << 
215 #endif                                             76 #endif
216                                                    77 
217 #if defined(arch_try_cmpxchg_release)          !!  78 #endif /* arch_cmpxchg64_relaxed */
218 #define raw_try_cmpxchg_release arch_try_cmpxc !!  79 
219 #elif defined(arch_try_cmpxchg_relaxed)        !!  80 #ifndef arch_try_cmpxchg_relaxed
220 #define raw_try_cmpxchg_release(...) \         !!  81 #ifdef arch_try_cmpxchg
221         __atomic_op_release(arch_try_cmpxchg,  !!  82 #define arch_try_cmpxchg_acquire arch_try_cmpxchg
222 #elif defined(arch_try_cmpxchg)                !!  83 #define arch_try_cmpxchg_release arch_try_cmpxchg
223 #define raw_try_cmpxchg_release arch_try_cmpxc !!  84 #define arch_try_cmpxchg_relaxed arch_try_cmpxchg
224 #else                                          !!  85 #endif /* arch_try_cmpxchg */
225 #define raw_try_cmpxchg_release(_ptr, _oldp, _ !!  86 
                                                   >>  87 #ifndef arch_try_cmpxchg
                                                   >>  88 #define arch_try_cmpxchg(_ptr, _oldp, _new) \
226 ({ \                                               89 ({ \
227         typeof(*(_ptr)) *___op = (_oldp), ___o     90         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
228         ___r = raw_cmpxchg_release((_ptr), ___ !!  91         ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
229         if (unlikely(___r != ___o)) \              92         if (unlikely(___r != ___o)) \
230                 *___op = ___r; \                   93                 *___op = ___r; \
231         likely(___r == ___o); \                    94         likely(___r == ___o); \
232 })                                                 95 })
233 #endif                                         !!  96 #endif /* arch_try_cmpxchg */
234                                                    97 
235 #if defined(arch_try_cmpxchg_relaxed)          !!  98 #ifndef arch_try_cmpxchg_acquire
236 #define raw_try_cmpxchg_relaxed arch_try_cmpxc !!  99 #define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
237 #elif defined(arch_try_cmpxchg)                << 
238 #define raw_try_cmpxchg_relaxed arch_try_cmpxc << 
239 #else                                          << 
240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _ << 
241 ({ \                                              100 ({ \
242         typeof(*(_ptr)) *___op = (_oldp), ___o    101         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
243         ___r = raw_cmpxchg_relaxed((_ptr), ___ !! 102         ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
244         if (unlikely(___r != ___o)) \             103         if (unlikely(___r != ___o)) \
245                 *___op = ___r; \                  104                 *___op = ___r; \
246         likely(___r == ___o); \                   105         likely(___r == ___o); \
247 })                                                106 })
248 #endif                                         !! 107 #endif /* arch_try_cmpxchg_acquire */
249                                                   108 
250 #if defined(arch_try_cmpxchg64)                !! 109 #ifndef arch_try_cmpxchg_release
251 #define raw_try_cmpxchg64 arch_try_cmpxchg64   !! 110 #define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
252 #elif defined(arch_try_cmpxchg64_relaxed)      << 
253 #define raw_try_cmpxchg64(...) \               << 
254         __atomic_op_fence(arch_try_cmpxchg64,  << 
255 #else                                          << 
256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \ << 
257 ({ \                                              111 ({ \
258         typeof(*(_ptr)) *___op = (_oldp), ___o    112         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
259         ___r = raw_cmpxchg64((_ptr), ___o, (_n !! 113         ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
260         if (unlikely(___r != ___o)) \             114         if (unlikely(___r != ___o)) \
261                 *___op = ___r; \                  115                 *___op = ___r; \
262         likely(___r == ___o); \                   116         likely(___r == ___o); \
263 })                                                117 })
264 #endif                                         !! 118 #endif /* arch_try_cmpxchg_release */
265                                                   119 
266 #if defined(arch_try_cmpxchg64_acquire)        !! 120 #ifndef arch_try_cmpxchg_relaxed
267 #define raw_try_cmpxchg64_acquire arch_try_cmp !! 121 #define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
268 #elif defined(arch_try_cmpxchg64_relaxed)      << 
269 #define raw_try_cmpxchg64_acquire(...) \       << 
270         __atomic_op_acquire(arch_try_cmpxchg64 << 
271 #elif defined(arch_try_cmpxchg64)              << 
272 #define raw_try_cmpxchg64_acquire arch_try_cmp << 
273 #else                                          << 
274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp, << 
275 ({ \                                              122 ({ \
276         typeof(*(_ptr)) *___op = (_oldp), ___o    123         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
277         ___r = raw_cmpxchg64_acquire((_ptr), _ !! 124         ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
278         if (unlikely(___r != ___o)) \             125         if (unlikely(___r != ___o)) \
279                 *___op = ___r; \                  126                 *___op = ___r; \
280         likely(___r == ___o); \                   127         likely(___r == ___o); \
281 })                                                128 })
                                                   >> 129 #endif /* arch_try_cmpxchg_relaxed */
                                                   >> 130 
                                                   >> 131 #else /* arch_try_cmpxchg_relaxed */
                                                   >> 132 
                                                   >> 133 #ifndef arch_try_cmpxchg_acquire
                                                   >> 134 #define arch_try_cmpxchg_acquire(...) \
                                                   >> 135         __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
282 #endif                                            136 #endif
283                                                   137 
284 #if defined(arch_try_cmpxchg64_release)        !! 138 #ifndef arch_try_cmpxchg_release
285 #define raw_try_cmpxchg64_release arch_try_cmp !! 139 #define arch_try_cmpxchg_release(...) \
286 #elif defined(arch_try_cmpxchg64_relaxed)      !! 140         __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
287 #define raw_try_cmpxchg64_release(...) \       << 
288         __atomic_op_release(arch_try_cmpxchg64 << 
289 #elif defined(arch_try_cmpxchg64)              << 
290 #define raw_try_cmpxchg64_release arch_try_cmp << 
291 #else                                          << 
292 #define raw_try_cmpxchg64_release(_ptr, _oldp, << 
293 ({ \                                           << 
294         typeof(*(_ptr)) *___op = (_oldp), ___o << 
295         ___r = raw_cmpxchg64_release((_ptr), _ << 
296         if (unlikely(___r != ___o)) \          << 
297                 *___op = ___r; \               << 
298         likely(___r == ___o); \                << 
299 })                                             << 
300 #endif                                            141 #endif
301                                                   142 
302 #if defined(arch_try_cmpxchg64_relaxed)        !! 143 #ifndef arch_try_cmpxchg
303 #define raw_try_cmpxchg64_relaxed arch_try_cmp !! 144 #define arch_try_cmpxchg(...) \
304 #elif defined(arch_try_cmpxchg64)              !! 145         __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
305 #define raw_try_cmpxchg64_relaxed arch_try_cmp << 
306 #else                                          << 
307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, << 
308 ({ \                                           << 
309         typeof(*(_ptr)) *___op = (_oldp), ___o << 
310         ___r = raw_cmpxchg64_relaxed((_ptr), _ << 
311         if (unlikely(___r != ___o)) \          << 
312                 *___op = ___r; \               << 
313         likely(___r == ___o); \                << 
314 })                                             << 
315 #endif                                            146 #endif
316                                                   147 
317 #if defined(arch_try_cmpxchg128)               !! 148 #endif /* arch_try_cmpxchg_relaxed */
318 #define raw_try_cmpxchg128 arch_try_cmpxchg128 !! 149 
319 #elif defined(arch_try_cmpxchg128_relaxed)     !! 150 #ifndef arch_try_cmpxchg64_relaxed
320 #define raw_try_cmpxchg128(...) \              !! 151 #ifdef arch_try_cmpxchg64
321         __atomic_op_fence(arch_try_cmpxchg128, !! 152 #define arch_try_cmpxchg64_acquire arch_try_cmpxchg64
322 #else                                          !! 153 #define arch_try_cmpxchg64_release arch_try_cmpxchg64
323 #define raw_try_cmpxchg128(_ptr, _oldp, _new)  !! 154 #define arch_try_cmpxchg64_relaxed arch_try_cmpxchg64
                                                   >> 155 #endif /* arch_try_cmpxchg64 */
                                                   >> 156 
                                                   >> 157 #ifndef arch_try_cmpxchg64
                                                   >> 158 #define arch_try_cmpxchg64(_ptr, _oldp, _new) \
324 ({ \                                              159 ({ \
325         typeof(*(_ptr)) *___op = (_oldp), ___o    160         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
326         ___r = raw_cmpxchg128((_ptr), ___o, (_ !! 161         ___r = arch_cmpxchg64((_ptr), ___o, (_new)); \
327         if (unlikely(___r != ___o)) \             162         if (unlikely(___r != ___o)) \
328                 *___op = ___r; \                  163                 *___op = ___r; \
329         likely(___r == ___o); \                   164         likely(___r == ___o); \
330 })                                                165 })
331 #endif                                         !! 166 #endif /* arch_try_cmpxchg64 */
332                                                   167 
333 #if defined(arch_try_cmpxchg128_acquire)       !! 168 #ifndef arch_try_cmpxchg64_acquire
334 #define raw_try_cmpxchg128_acquire arch_try_cm !! 169 #define arch_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
335 #elif defined(arch_try_cmpxchg128_relaxed)     << 
336 #define raw_try_cmpxchg128_acquire(...) \      << 
337         __atomic_op_acquire(arch_try_cmpxchg12 << 
338 #elif defined(arch_try_cmpxchg128)             << 
339 #define raw_try_cmpxchg128_acquire arch_try_cm << 
340 #else                                          << 
341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp << 
342 ({ \                                              170 ({ \
343         typeof(*(_ptr)) *___op = (_oldp), ___o    171         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
344         ___r = raw_cmpxchg128_acquire((_ptr),  !! 172         ___r = arch_cmpxchg64_acquire((_ptr), ___o, (_new)); \
345         if (unlikely(___r != ___o)) \             173         if (unlikely(___r != ___o)) \
346                 *___op = ___r; \                  174                 *___op = ___r; \
347         likely(___r == ___o); \                   175         likely(___r == ___o); \
348 })                                                176 })
349 #endif                                         !! 177 #endif /* arch_try_cmpxchg64_acquire */
350                                                   178 
351 #if defined(arch_try_cmpxchg128_release)       !! 179 #ifndef arch_try_cmpxchg64_release
352 #define raw_try_cmpxchg128_release arch_try_cm !! 180 #define arch_try_cmpxchg64_release(_ptr, _oldp, _new) \
353 #elif defined(arch_try_cmpxchg128_relaxed)     << 
354 #define raw_try_cmpxchg128_release(...) \      << 
355         __atomic_op_release(arch_try_cmpxchg12 << 
356 #elif defined(arch_try_cmpxchg128)             << 
357 #define raw_try_cmpxchg128_release arch_try_cm << 
358 #else                                          << 
359 #define raw_try_cmpxchg128_release(_ptr, _oldp << 
360 ({ \                                              181 ({ \
361         typeof(*(_ptr)) *___op = (_oldp), ___o    182         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
362         ___r = raw_cmpxchg128_release((_ptr),  !! 183         ___r = arch_cmpxchg64_release((_ptr), ___o, (_new)); \
363         if (unlikely(___r != ___o)) \             184         if (unlikely(___r != ___o)) \
364                 *___op = ___r; \                  185                 *___op = ___r; \
365         likely(___r == ___o); \                   186         likely(___r == ___o); \
366 })                                                187 })
367 #endif                                         !! 188 #endif /* arch_try_cmpxchg64_release */
368                                                   189 
369 #if defined(arch_try_cmpxchg128_relaxed)       !! 190 #ifndef arch_try_cmpxchg64_relaxed
370 #define raw_try_cmpxchg128_relaxed arch_try_cm !! 191 #define arch_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
371 #elif defined(arch_try_cmpxchg128)             << 
372 #define raw_try_cmpxchg128_relaxed arch_try_cm << 
373 #else                                          << 
374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp << 
375 ({ \                                              192 ({ \
376         typeof(*(_ptr)) *___op = (_oldp), ___o    193         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
377         ___r = raw_cmpxchg128_relaxed((_ptr),  !! 194         ___r = arch_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
378         if (unlikely(___r != ___o)) \             195         if (unlikely(___r != ___o)) \
379                 *___op = ___r; \                  196                 *___op = ___r; \
380         likely(___r == ___o); \                   197         likely(___r == ___o); \
381 })                                                198 })
382 #endif                                         !! 199 #endif /* arch_try_cmpxchg64_relaxed */
383                                                   200 
384 #define raw_cmpxchg_local arch_cmpxchg_local   !! 201 #else /* arch_try_cmpxchg64_relaxed */
385                                                   202 
386 #ifdef arch_try_cmpxchg_local                  !! 203 #ifndef arch_try_cmpxchg64_acquire
387 #define raw_try_cmpxchg_local arch_try_cmpxchg !! 204 #define arch_try_cmpxchg64_acquire(...) \
388 #else                                          !! 205         __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
389 #define raw_try_cmpxchg_local(_ptr, _oldp, _ne << 
390 ({ \                                           << 
391         typeof(*(_ptr)) *___op = (_oldp), ___o << 
392         ___r = raw_cmpxchg_local((_ptr), ___o, << 
393         if (unlikely(___r != ___o)) \          << 
394                 *___op = ___r; \               << 
395         likely(___r == ___o); \                << 
396 })                                             << 
397 #endif                                            206 #endif
398                                                   207 
399 #define raw_cmpxchg64_local arch_cmpxchg64_loc !! 208 #ifndef arch_try_cmpxchg64_release
                                                   >> 209 #define arch_try_cmpxchg64_release(...) \
                                                   >> 210         __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
                                                   >> 211 #endif
400                                                   212 
401 #ifdef arch_try_cmpxchg64_local                !! 213 #ifndef arch_try_cmpxchg64
402 #define raw_try_cmpxchg64_local arch_try_cmpxc !! 214 #define arch_try_cmpxchg64(...) \
403 #else                                          !! 215         __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _ << 
405 ({ \                                           << 
406         typeof(*(_ptr)) *___op = (_oldp), ___o << 
407         ___r = raw_cmpxchg64_local((_ptr), ___ << 
408         if (unlikely(___r != ___o)) \          << 
409                 *___op = ___r; \               << 
410         likely(___r == ___o); \                << 
411 })                                             << 
412 #endif                                            216 #endif
413                                                   217 
414 #define raw_cmpxchg128_local arch_cmpxchg128_l !! 218 #endif /* arch_try_cmpxchg64_relaxed */
415                                                   219 
416 #ifdef arch_try_cmpxchg128_local               !! 220 #ifndef arch_try_cmpxchg_local
417 #define raw_try_cmpxchg128_local arch_try_cmpx !! 221 #define arch_try_cmpxchg_local(_ptr, _oldp, _new) \
418 #else                                          << 
419 #define raw_try_cmpxchg128_local(_ptr, _oldp,  << 
420 ({ \                                              222 ({ \
421         typeof(*(_ptr)) *___op = (_oldp), ___o    223         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
422         ___r = raw_cmpxchg128_local((_ptr), __ !! 224         ___r = arch_cmpxchg_local((_ptr), ___o, (_new)); \
423         if (unlikely(___r != ___o)) \             225         if (unlikely(___r != ___o)) \
424                 *___op = ___r; \                  226                 *___op = ___r; \
425         likely(___r == ___o); \                   227         likely(___r == ___o); \
426 })                                                228 })
427 #endif                                         !! 229 #endif /* arch_try_cmpxchg_local */
428                                                << 
429 #define raw_sync_cmpxchg arch_sync_cmpxchg     << 
430                                                   230 
431 #ifdef arch_sync_try_cmpxchg                   !! 231 #ifndef arch_try_cmpxchg64_local
432 #define raw_sync_try_cmpxchg arch_sync_try_cmp !! 232 #define arch_try_cmpxchg64_local(_ptr, _oldp, _new) \
433 #else                                          << 
434 #define raw_sync_try_cmpxchg(_ptr, _oldp, _new << 
435 ({ \                                              233 ({ \
436         typeof(*(_ptr)) *___op = (_oldp), ___o    234         typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
437         ___r = raw_sync_cmpxchg((_ptr), ___o,  !! 235         ___r = arch_cmpxchg64_local((_ptr), ___o, (_new)); \
438         if (unlikely(___r != ___o)) \             236         if (unlikely(___r != ___o)) \
439                 *___op = ___r; \                  237                 *___op = ___r; \
440         likely(___r == ___o); \                   238         likely(___r == ___o); \
441 })                                                239 })
442 #endif                                         !! 240 #endif /* arch_try_cmpxchg64_local */
443                                                << 
444 /**                                            << 
445  * raw_atomic_read() - atomic load with relaxe << 
446  * @v: pointer to atomic_t                     << 
447  *                                             << 
448  * Atomically loads the value of @v with relax << 
449  *                                             << 
450  * Safe to use in noinstr code; prefer atomic_ << 
451  *                                             << 
452  * Return: The value loaded from @v.           << 
453  */                                            << 
454 static __always_inline int                     << 
455 raw_atomic_read(const atomic_t *v)             << 
456 {                                              << 
457         return arch_atomic_read(v);            << 
458 }                                              << 
459                                                   241 
460 /**                                            !! 242 #ifndef arch_atomic_read_acquire
461  * raw_atomic_read_acquire() - atomic load wit << 
462  * @v: pointer to atomic_t                     << 
463  *                                             << 
464  * Atomically loads the value of @v with acqui << 
465  *                                             << 
466  * Safe to use in noinstr code; prefer atomic_ << 
467  *                                             << 
468  * Return: The value loaded from @v.           << 
469  */                                            << 
470 static __always_inline int                        243 static __always_inline int
471 raw_atomic_read_acquire(const atomic_t *v)     !! 244 arch_atomic_read_acquire(const atomic_t *v)
472 {                                                 245 {
473 #if defined(arch_atomic_read_acquire)          << 
474         return arch_atomic_read_acquire(v);    << 
475 #else                                          << 
476         int ret;                                  246         int ret;
477                                                   247 
478         if (__native_word(atomic_t)) {            248         if (__native_word(atomic_t)) {
479                 ret = smp_load_acquire(&(v)->c    249                 ret = smp_load_acquire(&(v)->counter);
480         } else {                                  250         } else {
481                 ret = raw_atomic_read(v);      !! 251                 ret = arch_atomic_read(v);
482                 __atomic_acquire_fence();         252                 __atomic_acquire_fence();
483         }                                         253         }
484                                                   254 
485         return ret;                               255         return ret;
486 #endif                                         << 
487 }                                                 256 }
                                                   >> 257 #define arch_atomic_read_acquire arch_atomic_read_acquire
                                                   >> 258 #endif
488                                                   259 
489 /**                                            !! 260 #ifndef arch_atomic_set_release
490  * raw_atomic_set() - atomic set with relaxed  << 
491  * @v: pointer to atomic_t                     << 
492  * @i: int value to assign                     << 
493  *                                             << 
494  * Atomically sets @v to @i with relaxed order << 
495  *                                             << 
496  * Safe to use in noinstr code; prefer atomic_ << 
497  *                                             << 
498  * Return: Nothing.                            << 
499  */                                            << 
500 static __always_inline void                       261 static __always_inline void
501 raw_atomic_set(atomic_t *v, int i)             !! 262 arch_atomic_set_release(atomic_t *v, int i)
502 {                                                 263 {
503         arch_atomic_set(v, i);                 << 
504 }                                              << 
505                                                << 
506 /**                                            << 
507  * raw_atomic_set_release() - atomic set with  << 
508  * @v: pointer to atomic_t                     << 
509  * @i: int value to assign                     << 
510  *                                             << 
511  * Atomically sets @v to @i with release order << 
512  *                                             << 
513  * Safe to use in noinstr code; prefer atomic_ << 
514  *                                             << 
515  * Return: Nothing.                            << 
516  */                                            << 
517 static __always_inline void                    << 
518 raw_atomic_set_release(atomic_t *v, int i)     << 
519 {                                              << 
520 #if defined(arch_atomic_set_release)           << 
521         arch_atomic_set_release(v, i);         << 
522 #else                                          << 
523         if (__native_word(atomic_t)) {            264         if (__native_word(atomic_t)) {
524                 smp_store_release(&(v)->counte    265                 smp_store_release(&(v)->counter, i);
525         } else {                                  266         } else {
526                 __atomic_release_fence();         267                 __atomic_release_fence();
527                 raw_atomic_set(v, i);          !! 268                 arch_atomic_set(v, i);
528         }                                         269         }
                                                   >> 270 }
                                                   >> 271 #define arch_atomic_set_release arch_atomic_set_release
529 #endif                                            272 #endif
                                                   >> 273 
                                                   >> 274 #ifndef arch_atomic_add_return_relaxed
                                                   >> 275 #define arch_atomic_add_return_acquire arch_atomic_add_return
                                                   >> 276 #define arch_atomic_add_return_release arch_atomic_add_return
                                                   >> 277 #define arch_atomic_add_return_relaxed arch_atomic_add_return
                                                   >> 278 #else /* arch_atomic_add_return_relaxed */
                                                   >> 279 
                                                   >> 280 #ifndef arch_atomic_add_return_acquire
                                                   >> 281 static __always_inline int
                                                   >> 282 arch_atomic_add_return_acquire(int i, atomic_t *v)
                                                   >> 283 {
                                                   >> 284         int ret = arch_atomic_add_return_relaxed(i, v);
                                                   >> 285         __atomic_acquire_fence();
                                                   >> 286         return ret;
530 }                                                 287 }
                                                   >> 288 #define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
                                                   >> 289 #endif
531                                                   290 
532 /**                                            !! 291 #ifndef arch_atomic_add_return_release
533  * raw_atomic_add() - atomic add with relaxed  !! 292 static __always_inline int
534  * @i: int value to add                        !! 293 arch_atomic_add_return_release(int i, atomic_t *v)
535  * @v: pointer to atomic_t                     << 
536  *                                             << 
537  * Atomically updates @v to (@v + @i) with rel << 
538  *                                             << 
539  * Safe to use in noinstr code; prefer atomic_ << 
540  *                                             << 
541  * Return: Nothing.                            << 
542  */                                            << 
543 static __always_inline void                    << 
544 raw_atomic_add(int i, atomic_t *v)             << 
545 {                                                 294 {
546         arch_atomic_add(i, v);                 !! 295         __atomic_release_fence();
                                                   >> 296         return arch_atomic_add_return_relaxed(i, v);
547 }                                                 297 }
                                                   >> 298 #define arch_atomic_add_return_release arch_atomic_add_return_release
                                                   >> 299 #endif
548                                                   300 
549 /**                                            !! 301 #ifndef arch_atomic_add_return
550  * raw_atomic_add_return() - atomic add with f << 
551  * @i: int value to add                        << 
552  * @v: pointer to atomic_t                     << 
553  *                                             << 
554  * Atomically updates @v to (@v + @i) with ful << 
555  *                                             << 
556  * Safe to use in noinstr code; prefer atomic_ << 
557  *                                             << 
558  * Return: The updated value of @v.            << 
559  */                                            << 
560 static __always_inline int                        302 static __always_inline int
561 raw_atomic_add_return(int i, atomic_t *v)      !! 303 arch_atomic_add_return(int i, atomic_t *v)
562 {                                                 304 {
563 #if defined(arch_atomic_add_return)            << 
564         return arch_atomic_add_return(i, v);   << 
565 #elif defined(arch_atomic_add_return_relaxed)  << 
566         int ret;                                  305         int ret;
567         __atomic_pre_full_fence();                306         __atomic_pre_full_fence();
568         ret = arch_atomic_add_return_relaxed(i    307         ret = arch_atomic_add_return_relaxed(i, v);
569         __atomic_post_full_fence();               308         __atomic_post_full_fence();
570         return ret;                               309         return ret;
571 #else                                          << 
572 #error "Unable to define raw_atomic_add_return << 
573 #endif                                         << 
574 }                                                 310 }
                                                   >> 311 #define arch_atomic_add_return arch_atomic_add_return
                                                   >> 312 #endif
575                                                   313 
576 /**                                            !! 314 #endif /* arch_atomic_add_return_relaxed */
577  * raw_atomic_add_return_acquire() - atomic ad !! 315 
578  * @i: int value to add                        !! 316 #ifndef arch_atomic_fetch_add_relaxed
579  * @v: pointer to atomic_t                     !! 317 #define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
580  *                                             !! 318 #define arch_atomic_fetch_add_release arch_atomic_fetch_add
581  * Atomically updates @v to (@v + @i) with acq !! 319 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
582  *                                             !! 320 #else /* arch_atomic_fetch_add_relaxed */
583  * Safe to use in noinstr code; prefer atomic_ !! 321 
584  *                                             !! 322 #ifndef arch_atomic_fetch_add_acquire
585  * Return: The updated value of @v.            << 
586  */                                            << 
587 static __always_inline int                        323 static __always_inline int
588 raw_atomic_add_return_acquire(int i, atomic_t  !! 324 arch_atomic_fetch_add_acquire(int i, atomic_t *v)
589 {                                                 325 {
590 #if defined(arch_atomic_add_return_acquire)    !! 326         int ret = arch_atomic_fetch_add_relaxed(i, v);
591         return arch_atomic_add_return_acquire( << 
592 #elif defined(arch_atomic_add_return_relaxed)  << 
593         int ret = arch_atomic_add_return_relax << 
594         __atomic_acquire_fence();                 327         __atomic_acquire_fence();
595         return ret;                               328         return ret;
596 #elif defined(arch_atomic_add_return)          << 
597         return arch_atomic_add_return(i, v);   << 
598 #else                                          << 
599 #error "Unable to define raw_atomic_add_return << 
600 #endif                                         << 
601 }                                                 329 }
                                                   >> 330 #define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
                                                   >> 331 #endif
602                                                   332 
603 /**                                            !! 333 #ifndef arch_atomic_fetch_add_release
604  * raw_atomic_add_return_release() - atomic ad << 
605  * @i: int value to add                        << 
606  * @v: pointer to atomic_t                     << 
607  *                                             << 
608  * Atomically updates @v to (@v + @i) with rel << 
609  *                                             << 
610  * Safe to use in noinstr code; prefer atomic_ << 
611  *                                             << 
612  * Return: The updated value of @v.            << 
613  */                                            << 
614 static __always_inline int                        334 static __always_inline int
615 raw_atomic_add_return_release(int i, atomic_t  !! 335 arch_atomic_fetch_add_release(int i, atomic_t *v)
616 {                                                 336 {
617 #if defined(arch_atomic_add_return_release)    << 
618         return arch_atomic_add_return_release( << 
619 #elif defined(arch_atomic_add_return_relaxed)  << 
620         __atomic_release_fence();                 337         __atomic_release_fence();
621         return arch_atomic_add_return_relaxed( !! 338         return arch_atomic_fetch_add_relaxed(i, v);
622 #elif defined(arch_atomic_add_return)          << 
623         return arch_atomic_add_return(i, v);   << 
624 #else                                          << 
625 #error "Unable to define raw_atomic_add_return << 
626 #endif                                         << 
627 }                                                 339 }
628                                                !! 340 #define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
629 /**                                            << 
630  * raw_atomic_add_return_relaxed() - atomic ad << 
631  * @i: int value to add                        << 
632  * @v: pointer to atomic_t                     << 
633  *                                             << 
634  * Atomically updates @v to (@v + @i) with rel << 
635  *                                             << 
636  * Safe to use in noinstr code; prefer atomic_ << 
637  *                                             << 
638  * Return: The updated value of @v.            << 
639  */                                            << 
640 static __always_inline int                     << 
641 raw_atomic_add_return_relaxed(int i, atomic_t  << 
642 {                                              << 
643 #if defined(arch_atomic_add_return_relaxed)    << 
644         return arch_atomic_add_return_relaxed( << 
645 #elif defined(arch_atomic_add_return)          << 
646         return arch_atomic_add_return(i, v);   << 
647 #else                                          << 
648 #error "Unable to define raw_atomic_add_return << 
649 #endif                                            341 #endif
650 }                                              << 
651                                                   342 
652 /**                                            !! 343 #ifndef arch_atomic_fetch_add
653  * raw_atomic_fetch_add() - atomic add with fu << 
654  * @i: int value to add                        << 
655  * @v: pointer to atomic_t                     << 
656  *                                             << 
657  * Atomically updates @v to (@v + @i) with ful << 
658  *                                             << 
659  * Safe to use in noinstr code; prefer atomic_ << 
660  *                                             << 
661  * Return: The original value of @v.           << 
662  */                                            << 
663 static __always_inline int                        344 static __always_inline int
664 raw_atomic_fetch_add(int i, atomic_t *v)       !! 345 arch_atomic_fetch_add(int i, atomic_t *v)
665 {                                                 346 {
666 #if defined(arch_atomic_fetch_add)             << 
667         return arch_atomic_fetch_add(i, v);    << 
668 #elif defined(arch_atomic_fetch_add_relaxed)   << 
669         int ret;                                  347         int ret;
670         __atomic_pre_full_fence();                348         __atomic_pre_full_fence();
671         ret = arch_atomic_fetch_add_relaxed(i,    349         ret = arch_atomic_fetch_add_relaxed(i, v);
672         __atomic_post_full_fence();               350         __atomic_post_full_fence();
673         return ret;                               351         return ret;
674 #else                                          << 
675 #error "Unable to define raw_atomic_fetch_add" << 
676 #endif                                         << 
677 }                                                 352 }
                                                   >> 353 #define arch_atomic_fetch_add arch_atomic_fetch_add
                                                   >> 354 #endif
678                                                   355 
679 /**                                            !! 356 #endif /* arch_atomic_fetch_add_relaxed */
680  * raw_atomic_fetch_add_acquire() - atomic add !! 357 
681  * @i: int value to add                        !! 358 #ifndef arch_atomic_sub_return_relaxed
682  * @v: pointer to atomic_t                     !! 359 #define arch_atomic_sub_return_acquire arch_atomic_sub_return
683  *                                             !! 360 #define arch_atomic_sub_return_release arch_atomic_sub_return
684  * Atomically updates @v to (@v + @i) with acq !! 361 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return
685  *                                             !! 362 #else /* arch_atomic_sub_return_relaxed */
686  * Safe to use in noinstr code; prefer atomic_ !! 363 
687  *                                             !! 364 #ifndef arch_atomic_sub_return_acquire
688  * Return: The original value of @v.           << 
689  */                                            << 
690 static __always_inline int                        365 static __always_inline int
691 raw_atomic_fetch_add_acquire(int i, atomic_t * !! 366 arch_atomic_sub_return_acquire(int i, atomic_t *v)
692 {                                                 367 {
693 #if defined(arch_atomic_fetch_add_acquire)     !! 368         int ret = arch_atomic_sub_return_relaxed(i, v);
694         return arch_atomic_fetch_add_acquire(i << 
695 #elif defined(arch_atomic_fetch_add_relaxed)   << 
696         int ret = arch_atomic_fetch_add_relaxe << 
697         __atomic_acquire_fence();                 369         __atomic_acquire_fence();
698         return ret;                               370         return ret;
699 #elif defined(arch_atomic_fetch_add)           << 
700         return arch_atomic_fetch_add(i, v);    << 
701 #else                                          << 
702 #error "Unable to define raw_atomic_fetch_add_ << 
703 #endif                                         << 
704 }                                                 371 }
                                                   >> 372 #define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
                                                   >> 373 #endif
705                                                   374 
706 /**                                            !! 375 #ifndef arch_atomic_sub_return_release
707  * raw_atomic_fetch_add_release() - atomic add << 
708  * @i: int value to add                        << 
709  * @v: pointer to atomic_t                     << 
710  *                                             << 
711  * Atomically updates @v to (@v + @i) with rel << 
712  *                                             << 
713  * Safe to use in noinstr code; prefer atomic_ << 
714  *                                             << 
715  * Return: The original value of @v.           << 
716  */                                            << 
717 static __always_inline int                        376 static __always_inline int
718 raw_atomic_fetch_add_release(int i, atomic_t * !! 377 arch_atomic_sub_return_release(int i, atomic_t *v)
719 {                                                 378 {
720 #if defined(arch_atomic_fetch_add_release)     << 
721         return arch_atomic_fetch_add_release(i << 
722 #elif defined(arch_atomic_fetch_add_relaxed)   << 
723         __atomic_release_fence();                 379         __atomic_release_fence();
724         return arch_atomic_fetch_add_relaxed(i !! 380         return arch_atomic_sub_return_relaxed(i, v);
725 #elif defined(arch_atomic_fetch_add)           << 
726         return arch_atomic_fetch_add(i, v);    << 
727 #else                                          << 
728 #error "Unable to define raw_atomic_fetch_add_ << 
729 #endif                                         << 
730 }                                                 381 }
731                                                !! 382 #define arch_atomic_sub_return_release arch_atomic_sub_return_release
732 /**                                            << 
733  * raw_atomic_fetch_add_relaxed() - atomic add << 
734  * @i: int value to add                        << 
735  * @v: pointer to atomic_t                     << 
736  *                                             << 
737  * Atomically updates @v to (@v + @i) with rel << 
738  *                                             << 
739  * Safe to use in noinstr code; prefer atomic_ << 
740  *                                             << 
741  * Return: The original value of @v.           << 
742  */                                            << 
743 static __always_inline int                     << 
744 raw_atomic_fetch_add_relaxed(int i, atomic_t * << 
745 {                                              << 
746 #if defined(arch_atomic_fetch_add_relaxed)     << 
747         return arch_atomic_fetch_add_relaxed(i << 
748 #elif defined(arch_atomic_fetch_add)           << 
749         return arch_atomic_fetch_add(i, v);    << 
750 #else                                          << 
751 #error "Unable to define raw_atomic_fetch_add_ << 
752 #endif                                            383 #endif
753 }                                              << 
754                                                   384 
755 /**                                            !! 385 #ifndef arch_atomic_sub_return
756  * raw_atomic_sub() - atomic subtract with rel << 
757  * @i: int value to subtract                   << 
758  * @v: pointer to atomic_t                     << 
759  *                                             << 
760  * Atomically updates @v to (@v - @i) with rel << 
761  *                                             << 
762  * Safe to use in noinstr code; prefer atomic_ << 
763  *                                             << 
764  * Return: Nothing.                            << 
765  */                                            << 
766 static __always_inline void                    << 
767 raw_atomic_sub(int i, atomic_t *v)             << 
768 {                                              << 
769         arch_atomic_sub(i, v);                 << 
770 }                                              << 
771                                                << 
772 /**                                            << 
773  * raw_atomic_sub_return() - atomic subtract w << 
774  * @i: int value to subtract                   << 
775  * @v: pointer to atomic_t                     << 
776  *                                             << 
777  * Atomically updates @v to (@v - @i) with ful << 
778  *                                             << 
779  * Safe to use in noinstr code; prefer atomic_ << 
780  *                                             << 
781  * Return: The updated value of @v.            << 
782  */                                            << 
783 static __always_inline int                        386 static __always_inline int
784 raw_atomic_sub_return(int i, atomic_t *v)      !! 387 arch_atomic_sub_return(int i, atomic_t *v)
785 {                                                 388 {
786 #if defined(arch_atomic_sub_return)            << 
787         return arch_atomic_sub_return(i, v);   << 
788 #elif defined(arch_atomic_sub_return_relaxed)  << 
789         int ret;                                  389         int ret;
790         __atomic_pre_full_fence();                390         __atomic_pre_full_fence();
791         ret = arch_atomic_sub_return_relaxed(i    391         ret = arch_atomic_sub_return_relaxed(i, v);
792         __atomic_post_full_fence();               392         __atomic_post_full_fence();
793         return ret;                               393         return ret;
794 #else                                          << 
795 #error "Unable to define raw_atomic_sub_return << 
796 #endif                                         << 
797 }                                                 394 }
                                                   >> 395 #define arch_atomic_sub_return arch_atomic_sub_return
                                                   >> 396 #endif
798                                                   397 
799 /**                                            !! 398 #endif /* arch_atomic_sub_return_relaxed */
800  * raw_atomic_sub_return_acquire() - atomic su !! 399 
801  * @i: int value to subtract                   !! 400 #ifndef arch_atomic_fetch_sub_relaxed
802  * @v: pointer to atomic_t                     !! 401 #define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
803  *                                             !! 402 #define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
804  * Atomically updates @v to (@v - @i) with acq !! 403 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
805  *                                             !! 404 #else /* arch_atomic_fetch_sub_relaxed */
806  * Safe to use in noinstr code; prefer atomic_ !! 405 
807  *                                             !! 406 #ifndef arch_atomic_fetch_sub_acquire
808  * Return: The updated value of @v.            << 
809  */                                            << 
810 static __always_inline int                        407 static __always_inline int
811 raw_atomic_sub_return_acquire(int i, atomic_t  !! 408 arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
812 {                                                 409 {
813 #if defined(arch_atomic_sub_return_acquire)    !! 410         int ret = arch_atomic_fetch_sub_relaxed(i, v);
814         return arch_atomic_sub_return_acquire( << 
815 #elif defined(arch_atomic_sub_return_relaxed)  << 
816         int ret = arch_atomic_sub_return_relax << 
817         __atomic_acquire_fence();                 411         __atomic_acquire_fence();
818         return ret;                               412         return ret;
819 #elif defined(arch_atomic_sub_return)          << 
820         return arch_atomic_sub_return(i, v);   << 
821 #else                                          << 
822 #error "Unable to define raw_atomic_sub_return << 
823 #endif                                         << 
824 }                                                 413 }
                                                   >> 414 #define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
                                                   >> 415 #endif
825                                                   416 
826 /**                                            !! 417 #ifndef arch_atomic_fetch_sub_release
827  * raw_atomic_sub_return_release() - atomic su << 
828  * @i: int value to subtract                   << 
829  * @v: pointer to atomic_t                     << 
830  *                                             << 
831  * Atomically updates @v to (@v - @i) with rel << 
832  *                                             << 
833  * Safe to use in noinstr code; prefer atomic_ << 
834  *                                             << 
835  * Return: The updated value of @v.            << 
836  */                                            << 
837 static __always_inline int                        418 static __always_inline int
838 raw_atomic_sub_return_release(int i, atomic_t  !! 419 arch_atomic_fetch_sub_release(int i, atomic_t *v)
839 {                                                 420 {
840 #if defined(arch_atomic_sub_return_release)    << 
841         return arch_atomic_sub_return_release( << 
842 #elif defined(arch_atomic_sub_return_relaxed)  << 
843         __atomic_release_fence();                 421         __atomic_release_fence();
844         return arch_atomic_sub_return_relaxed( !! 422         return arch_atomic_fetch_sub_relaxed(i, v);
845 #elif defined(arch_atomic_sub_return)          << 
846         return arch_atomic_sub_return(i, v);   << 
847 #else                                          << 
848 #error "Unable to define raw_atomic_sub_return << 
849 #endif                                         << 
850 }                                                 423 }
851                                                !! 424 #define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
852 /**                                            << 
853  * raw_atomic_sub_return_relaxed() - atomic su << 
854  * @i: int value to subtract                   << 
855  * @v: pointer to atomic_t                     << 
856  *                                             << 
857  * Atomically updates @v to (@v - @i) with rel << 
858  *                                             << 
859  * Safe to use in noinstr code; prefer atomic_ << 
860  *                                             << 
861  * Return: The updated value of @v.            << 
862  */                                            << 
863 static __always_inline int                     << 
864 raw_atomic_sub_return_relaxed(int i, atomic_t  << 
865 {                                              << 
866 #if defined(arch_atomic_sub_return_relaxed)    << 
867         return arch_atomic_sub_return_relaxed( << 
868 #elif defined(arch_atomic_sub_return)          << 
869         return arch_atomic_sub_return(i, v);   << 
870 #else                                          << 
871 #error "Unable to define raw_atomic_sub_return << 
872 #endif                                            425 #endif
873 }                                              << 
874                                                   426 
875 /**                                            !! 427 #ifndef arch_atomic_fetch_sub
876  * raw_atomic_fetch_sub() - atomic subtract wi << 
877  * @i: int value to subtract                   << 
878  * @v: pointer to atomic_t                     << 
879  *                                             << 
880  * Atomically updates @v to (@v - @i) with ful << 
881  *                                             << 
882  * Safe to use in noinstr code; prefer atomic_ << 
883  *                                             << 
884  * Return: The original value of @v.           << 
885  */                                            << 
886 static __always_inline int                        428 static __always_inline int
887 raw_atomic_fetch_sub(int i, atomic_t *v)       !! 429 arch_atomic_fetch_sub(int i, atomic_t *v)
888 {                                                 430 {
889 #if defined(arch_atomic_fetch_sub)             << 
890         return arch_atomic_fetch_sub(i, v);    << 
891 #elif defined(arch_atomic_fetch_sub_relaxed)   << 
892         int ret;                                  431         int ret;
893         __atomic_pre_full_fence();                432         __atomic_pre_full_fence();
894         ret = arch_atomic_fetch_sub_relaxed(i,    433         ret = arch_atomic_fetch_sub_relaxed(i, v);
895         __atomic_post_full_fence();               434         __atomic_post_full_fence();
896         return ret;                               435         return ret;
897 #else                                          !! 436 }
898 #error "Unable to define raw_atomic_fetch_sub" !! 437 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
899 #endif                                            438 #endif
                                                   >> 439 
                                                   >> 440 #endif /* arch_atomic_fetch_sub_relaxed */
                                                   >> 441 
                                                   >> 442 #ifndef arch_atomic_inc
                                                   >> 443 static __always_inline void
                                                   >> 444 arch_atomic_inc(atomic_t *v)
                                                   >> 445 {
                                                   >> 446         arch_atomic_add(1, v);
900 }                                                 447 }
                                                   >> 448 #define arch_atomic_inc arch_atomic_inc
                                                   >> 449 #endif
901                                                   450 
902 /**                                            !! 451 #ifndef arch_atomic_inc_return_relaxed
903  * raw_atomic_fetch_sub_acquire() - atomic sub !! 452 #ifdef arch_atomic_inc_return
904  * @i: int value to subtract                   !! 453 #define arch_atomic_inc_return_acquire arch_atomic_inc_return
905  * @v: pointer to atomic_t                     !! 454 #define arch_atomic_inc_return_release arch_atomic_inc_return
906  *                                             !! 455 #define arch_atomic_inc_return_relaxed arch_atomic_inc_return
907  * Atomically updates @v to (@v - @i) with acq !! 456 #endif /* arch_atomic_inc_return */
908  *                                             !! 457 
909  * Safe to use in noinstr code; prefer atomic_ !! 458 #ifndef arch_atomic_inc_return
910  *                                             << 
911  * Return: The original value of @v.           << 
912  */                                            << 
913 static __always_inline int                        459 static __always_inline int
914 raw_atomic_fetch_sub_acquire(int i, atomic_t * !! 460 arch_atomic_inc_return(atomic_t *v)
915 {                                                 461 {
916 #if defined(arch_atomic_fetch_sub_acquire)     !! 462         return arch_atomic_add_return(1, v);
917         return arch_atomic_fetch_sub_acquire(i << 
918 #elif defined(arch_atomic_fetch_sub_relaxed)   << 
919         int ret = arch_atomic_fetch_sub_relaxe << 
920         __atomic_acquire_fence();              << 
921         return ret;                            << 
922 #elif defined(arch_atomic_fetch_sub)           << 
923         return arch_atomic_fetch_sub(i, v);    << 
924 #else                                          << 
925 #error "Unable to define raw_atomic_fetch_sub_ << 
926 #endif                                         << 
927 }                                                 463 }
                                                   >> 464 #define arch_atomic_inc_return arch_atomic_inc_return
                                                   >> 465 #endif
928                                                   466 
929 /**                                            !! 467 #ifndef arch_atomic_inc_return_acquire
930  * raw_atomic_fetch_sub_release() - atomic sub << 
931  * @i: int value to subtract                   << 
932  * @v: pointer to atomic_t                     << 
933  *                                             << 
934  * Atomically updates @v to (@v - @i) with rel << 
935  *                                             << 
936  * Safe to use in noinstr code; prefer atomic_ << 
937  *                                             << 
938  * Return: The original value of @v.           << 
939  */                                            << 
940 static __always_inline int                        468 static __always_inline int
941 raw_atomic_fetch_sub_release(int i, atomic_t * !! 469 arch_atomic_inc_return_acquire(atomic_t *v)
942 {                                                 470 {
943 #if defined(arch_atomic_fetch_sub_release)     !! 471         return arch_atomic_add_return_acquire(1, v);
944         return arch_atomic_fetch_sub_release(i << 
945 #elif defined(arch_atomic_fetch_sub_relaxed)   << 
946         __atomic_release_fence();              << 
947         return arch_atomic_fetch_sub_relaxed(i << 
948 #elif defined(arch_atomic_fetch_sub)           << 
949         return arch_atomic_fetch_sub(i, v);    << 
950 #else                                          << 
951 #error "Unable to define raw_atomic_fetch_sub_ << 
952 #endif                                         << 
953 }                                                 472 }
                                                   >> 473 #define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
                                                   >> 474 #endif
954                                                   475 
955 /**                                            !! 476 #ifndef arch_atomic_inc_return_release
956  * raw_atomic_fetch_sub_relaxed() - atomic sub << 
957  * @i: int value to subtract                   << 
958  * @v: pointer to atomic_t                     << 
959  *                                             << 
960  * Atomically updates @v to (@v - @i) with rel << 
961  *                                             << 
962  * Safe to use in noinstr code; prefer atomic_ << 
963  *                                             << 
964  * Return: The original value of @v.           << 
965  */                                            << 
966 static __always_inline int                        477 static __always_inline int
967 raw_atomic_fetch_sub_relaxed(int i, atomic_t * !! 478 arch_atomic_inc_return_release(atomic_t *v)
968 {                                                 479 {
969 #if defined(arch_atomic_fetch_sub_relaxed)     !! 480         return arch_atomic_add_return_release(1, v);
970         return arch_atomic_fetch_sub_relaxed(i !! 481 }
971 #elif defined(arch_atomic_fetch_sub)           !! 482 #define arch_atomic_inc_return_release arch_atomic_inc_return_release
972         return arch_atomic_fetch_sub(i, v);    << 
973 #else                                          << 
974 #error "Unable to define raw_atomic_fetch_sub_ << 
975 #endif                                            483 #endif
                                                   >> 484 
                                                   >> 485 #ifndef arch_atomic_inc_return_relaxed
                                                   >> 486 static __always_inline int
                                                   >> 487 arch_atomic_inc_return_relaxed(atomic_t *v)
                                                   >> 488 {
                                                   >> 489         return arch_atomic_add_return_relaxed(1, v);
976 }                                                 490 }
                                                   >> 491 #define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
                                                   >> 492 #endif
977                                                   493 
978 /**                                            !! 494 #else /* arch_atomic_inc_return_relaxed */
979  * raw_atomic_inc() - atomic increment with re !! 495 
980  * @v: pointer to atomic_t                     !! 496 #ifndef arch_atomic_inc_return_acquire
981  *                                             !! 497 static __always_inline int
982  * Atomically updates @v to (@v + 1) with rela !! 498 arch_atomic_inc_return_acquire(atomic_t *v)
983  *                                             << 
984  * Safe to use in noinstr code; prefer atomic_ << 
985  *                                             << 
986  * Return: Nothing.                            << 
987  */                                            << 
988 static __always_inline void                    << 
989 raw_atomic_inc(atomic_t *v)                    << 
990 {                                                 499 {
991 #if defined(arch_atomic_inc)                   !! 500         int ret = arch_atomic_inc_return_relaxed(v);
992         arch_atomic_inc(v);                    !! 501         __atomic_acquire_fence();
993 #else                                          !! 502         return ret;
994         raw_atomic_add(1, v);                  !! 503 }
                                                   >> 504 #define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
995 #endif                                            505 #endif
                                                   >> 506 
                                                   >> 507 #ifndef arch_atomic_inc_return_release
                                                   >> 508 static __always_inline int
                                                   >> 509 arch_atomic_inc_return_release(atomic_t *v)
                                                   >> 510 {
                                                   >> 511         __atomic_release_fence();
                                                   >> 512         return arch_atomic_inc_return_relaxed(v);
996 }                                                 513 }
                                                   >> 514 #define arch_atomic_inc_return_release arch_atomic_inc_return_release
                                                   >> 515 #endif
997                                                   516 
998 /**                                            !! 517 #ifndef arch_atomic_inc_return
999  * raw_atomic_inc_return() - atomic increment  << 
1000  * @v: pointer to atomic_t                    << 
1001  *                                            << 
1002  * Atomically updates @v to (@v + 1) with ful << 
1003  *                                            << 
1004  * Safe to use in noinstr code; prefer atomic << 
1005  *                                            << 
1006  * Return: The updated value of @v.           << 
1007  */                                           << 
1008 static __always_inline int                       518 static __always_inline int
1009 raw_atomic_inc_return(atomic_t *v)            !! 519 arch_atomic_inc_return(atomic_t *v)
1010 {                                                520 {
1011 #if defined(arch_atomic_inc_return)           << 
1012         return arch_atomic_inc_return(v);     << 
1013 #elif defined(arch_atomic_inc_return_relaxed) << 
1014         int ret;                                 521         int ret;
1015         __atomic_pre_full_fence();               522         __atomic_pre_full_fence();
1016         ret = arch_atomic_inc_return_relaxed(    523         ret = arch_atomic_inc_return_relaxed(v);
1017         __atomic_post_full_fence();              524         __atomic_post_full_fence();
1018         return ret;                              525         return ret;
1019 #else                                         << 
1020         return raw_atomic_add_return(1, v);   << 
1021 #endif                                        << 
1022 }                                                526 }
                                                   >> 527 #define arch_atomic_inc_return arch_atomic_inc_return
                                                   >> 528 #endif
1023                                                  529 
1024 /**                                           !! 530 #endif /* arch_atomic_inc_return_relaxed */
1025  * raw_atomic_inc_return_acquire() - atomic i !! 531 
1026  * @v: pointer to atomic_t                    !! 532 #ifndef arch_atomic_fetch_inc_relaxed
1027  *                                            !! 533 #ifdef arch_atomic_fetch_inc
1028  * Atomically updates @v to (@v + 1) with acq !! 534 #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
1029  *                                            !! 535 #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
1030  * Safe to use in noinstr code; prefer atomic !! 536 #define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
1031  *                                            !! 537 #endif /* arch_atomic_fetch_inc */
1032  * Return: The updated value of @v.           !! 538 
1033  */                                           !! 539 #ifndef arch_atomic_fetch_inc
1034 static __always_inline int                       540 static __always_inline int
1035 raw_atomic_inc_return_acquire(atomic_t *v)    !! 541 arch_atomic_fetch_inc(atomic_t *v)
1036 {                                                542 {
1037 #if defined(arch_atomic_inc_return_acquire)   !! 543         return arch_atomic_fetch_add(1, v);
1038         return arch_atomic_inc_return_acquire << 
1039 #elif defined(arch_atomic_inc_return_relaxed) << 
1040         int ret = arch_atomic_inc_return_rela << 
1041         __atomic_acquire_fence();             << 
1042         return ret;                           << 
1043 #elif defined(arch_atomic_inc_return)         << 
1044         return arch_atomic_inc_return(v);     << 
1045 #else                                         << 
1046         return raw_atomic_add_return_acquire( << 
1047 #endif                                        << 
1048 }                                                544 }
                                                   >> 545 #define arch_atomic_fetch_inc arch_atomic_fetch_inc
                                                   >> 546 #endif
1049                                                  547 
1050 /**                                           !! 548 #ifndef arch_atomic_fetch_inc_acquire
1051  * raw_atomic_inc_return_release() - atomic i << 
1052  * @v: pointer to atomic_t                    << 
1053  *                                            << 
1054  * Atomically updates @v to (@v + 1) with rel << 
1055  *                                            << 
1056  * Safe to use in noinstr code; prefer atomic << 
1057  *                                            << 
1058  * Return: The updated value of @v.           << 
1059  */                                           << 
1060 static __always_inline int                       549 static __always_inline int
1061 raw_atomic_inc_return_release(atomic_t *v)    !! 550 arch_atomic_fetch_inc_acquire(atomic_t *v)
1062 {                                                551 {
1063 #if defined(arch_atomic_inc_return_release)   !! 552         return arch_atomic_fetch_add_acquire(1, v);
1064         return arch_atomic_inc_return_release << 
1065 #elif defined(arch_atomic_inc_return_relaxed) << 
1066         __atomic_release_fence();             << 
1067         return arch_atomic_inc_return_relaxed << 
1068 #elif defined(arch_atomic_inc_return)         << 
1069         return arch_atomic_inc_return(v);     << 
1070 #else                                         << 
1071         return raw_atomic_add_return_release( << 
1072 #endif                                        << 
1073 }                                                553 }
                                                   >> 554 #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
                                                   >> 555 #endif
1074                                                  556 
1075 /**                                           !! 557 #ifndef arch_atomic_fetch_inc_release
1076  * raw_atomic_inc_return_relaxed() - atomic i << 
1077  * @v: pointer to atomic_t                    << 
1078  *                                            << 
1079  * Atomically updates @v to (@v + 1) with rel << 
1080  *                                            << 
1081  * Safe to use in noinstr code; prefer atomic << 
1082  *                                            << 
1083  * Return: The updated value of @v.           << 
1084  */                                           << 
1085 static __always_inline int                       558 static __always_inline int
1086 raw_atomic_inc_return_relaxed(atomic_t *v)    !! 559 arch_atomic_fetch_inc_release(atomic_t *v)
1087 {                                                560 {
1088 #if defined(arch_atomic_inc_return_relaxed)   !! 561         return arch_atomic_fetch_add_release(1, v);
1089         return arch_atomic_inc_return_relaxed << 
1090 #elif defined(arch_atomic_inc_return)         << 
1091         return arch_atomic_inc_return(v);     << 
1092 #else                                         << 
1093         return raw_atomic_add_return_relaxed( << 
1094 #endif                                        << 
1095 }                                                562 }
                                                   >> 563 #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
                                                   >> 564 #endif
1096                                                  565 
1097 /**                                           !! 566 #ifndef arch_atomic_fetch_inc_relaxed
1098  * raw_atomic_fetch_inc() - atomic increment  << 
1099  * @v: pointer to atomic_t                    << 
1100  *                                            << 
1101  * Atomically updates @v to (@v + 1) with ful << 
1102  *                                            << 
1103  * Safe to use in noinstr code; prefer atomic << 
1104  *                                            << 
1105  * Return: The original value of @v.          << 
1106  */                                           << 
1107 static __always_inline int                       567 static __always_inline int
1108 raw_atomic_fetch_inc(atomic_t *v)             !! 568 arch_atomic_fetch_inc_relaxed(atomic_t *v)
1109 {                                                569 {
1110 #if defined(arch_atomic_fetch_inc)            !! 570         return arch_atomic_fetch_add_relaxed(1, v);
1111         return arch_atomic_fetch_inc(v);      << 
1112 #elif defined(arch_atomic_fetch_inc_relaxed)  << 
1113         int ret;                              << 
1114         __atomic_pre_full_fence();            << 
1115         ret = arch_atomic_fetch_inc_relaxed(v << 
1116         __atomic_post_full_fence();           << 
1117         return ret;                           << 
1118 #else                                         << 
1119         return raw_atomic_fetch_add(1, v);    << 
1120 #endif                                        << 
1121 }                                                571 }
                                                   >> 572 #define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
                                                   >> 573 #endif
1122                                                  574 
1123 /**                                           !! 575 #else /* arch_atomic_fetch_inc_relaxed */
1124  * raw_atomic_fetch_inc_acquire() - atomic in !! 576 
1125  * @v: pointer to atomic_t                    !! 577 #ifndef arch_atomic_fetch_inc_acquire
1126  *                                            << 
1127  * Atomically updates @v to (@v + 1) with acq << 
1128  *                                            << 
1129  * Safe to use in noinstr code; prefer atomic << 
1130  *                                            << 
1131  * Return: The original value of @v.          << 
1132  */                                           << 
1133 static __always_inline int                       578 static __always_inline int
1134 raw_atomic_fetch_inc_acquire(atomic_t *v)     !! 579 arch_atomic_fetch_inc_acquire(atomic_t *v)
1135 {                                                580 {
1136 #if defined(arch_atomic_fetch_inc_acquire)    << 
1137         return arch_atomic_fetch_inc_acquire( << 
1138 #elif defined(arch_atomic_fetch_inc_relaxed)  << 
1139         int ret = arch_atomic_fetch_inc_relax    581         int ret = arch_atomic_fetch_inc_relaxed(v);
1140         __atomic_acquire_fence();                582         __atomic_acquire_fence();
1141         return ret;                              583         return ret;
1142 #elif defined(arch_atomic_fetch_inc)          << 
1143         return arch_atomic_fetch_inc(v);      << 
1144 #else                                         << 
1145         return raw_atomic_fetch_add_acquire(1 << 
1146 #endif                                        << 
1147 }                                                584 }
                                                   >> 585 #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
                                                   >> 586 #endif
1148                                                  587 
1149 /**                                           !! 588 #ifndef arch_atomic_fetch_inc_release
1150  * raw_atomic_fetch_inc_release() - atomic in << 
1151  * @v: pointer to atomic_t                    << 
1152  *                                            << 
1153  * Atomically updates @v to (@v + 1) with rel << 
1154  *                                            << 
1155  * Safe to use in noinstr code; prefer atomic << 
1156  *                                            << 
1157  * Return: The original value of @v.          << 
1158  */                                           << 
1159 static __always_inline int                       589 static __always_inline int
1160 raw_atomic_fetch_inc_release(atomic_t *v)     !! 590 arch_atomic_fetch_inc_release(atomic_t *v)
1161 {                                                591 {
1162 #if defined(arch_atomic_fetch_inc_release)    << 
1163         return arch_atomic_fetch_inc_release( << 
1164 #elif defined(arch_atomic_fetch_inc_relaxed)  << 
1165         __atomic_release_fence();                592         __atomic_release_fence();
1166         return arch_atomic_fetch_inc_relaxed(    593         return arch_atomic_fetch_inc_relaxed(v);
1167 #elif defined(arch_atomic_fetch_inc)          << 
1168         return arch_atomic_fetch_inc(v);      << 
1169 #else                                         << 
1170         return raw_atomic_fetch_add_release(1 << 
1171 #endif                                        << 
1172 }                                                594 }
                                                   >> 595 #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
                                                   >> 596 #endif
1173                                                  597 
1174 /**                                           !! 598 #ifndef arch_atomic_fetch_inc
1175  * raw_atomic_fetch_inc_relaxed() - atomic in << 
1176  * @v: pointer to atomic_t                    << 
1177  *                                            << 
1178  * Atomically updates @v to (@v + 1) with rel << 
1179  *                                            << 
1180  * Safe to use in noinstr code; prefer atomic << 
1181  *                                            << 
1182  * Return: The original value of @v.          << 
1183  */                                           << 
1184 static __always_inline int                       599 static __always_inline int
1185 raw_atomic_fetch_inc_relaxed(atomic_t *v)     !! 600 arch_atomic_fetch_inc(atomic_t *v)
1186 {                                                601 {
1187 #if defined(arch_atomic_fetch_inc_relaxed)    !! 602         int ret;
1188         return arch_atomic_fetch_inc_relaxed( !! 603         __atomic_pre_full_fence();
1189 #elif defined(arch_atomic_fetch_inc)          !! 604         ret = arch_atomic_fetch_inc_relaxed(v);
1190         return arch_atomic_fetch_inc(v);      !! 605         __atomic_post_full_fence();
1191 #else                                         !! 606         return ret;
1192         return raw_atomic_fetch_add_relaxed(1 << 
1193 #endif                                        << 
1194 }                                                607 }
                                                   >> 608 #define arch_atomic_fetch_inc arch_atomic_fetch_inc
                                                   >> 609 #endif
1195                                                  610 
1196 /**                                           !! 611 #endif /* arch_atomic_fetch_inc_relaxed */
1197  * raw_atomic_dec() - atomic decrement with r !! 612 
1198  * @v: pointer to atomic_t                    !! 613 #ifndef arch_atomic_dec
1199  *                                            << 
1200  * Atomically updates @v to (@v - 1) with rel << 
1201  *                                            << 
1202  * Safe to use in noinstr code; prefer atomic << 
1203  *                                            << 
1204  * Return: Nothing.                           << 
1205  */                                           << 
1206 static __always_inline void                      614 static __always_inline void
1207 raw_atomic_dec(atomic_t *v)                   !! 615 arch_atomic_dec(atomic_t *v)
1208 {                                                616 {
1209 #if defined(arch_atomic_dec)                  !! 617         arch_atomic_sub(1, v);
1210         arch_atomic_dec(v);                   !! 618 }
1211 #else                                         !! 619 #define arch_atomic_dec arch_atomic_dec
1212         raw_atomic_sub(1, v);                 << 
1213 #endif                                           620 #endif
                                                   >> 621 
                                                   >> 622 #ifndef arch_atomic_dec_return_relaxed
                                                   >> 623 #ifdef arch_atomic_dec_return
                                                   >> 624 #define arch_atomic_dec_return_acquire arch_atomic_dec_return
                                                   >> 625 #define arch_atomic_dec_return_release arch_atomic_dec_return
                                                   >> 626 #define arch_atomic_dec_return_relaxed arch_atomic_dec_return
                                                   >> 627 #endif /* arch_atomic_dec_return */
                                                   >> 628 
                                                   >> 629 #ifndef arch_atomic_dec_return
                                                   >> 630 static __always_inline int
                                                   >> 631 arch_atomic_dec_return(atomic_t *v)
                                                   >> 632 {
                                                   >> 633         return arch_atomic_sub_return(1, v);
1214 }                                                634 }
                                                   >> 635 #define arch_atomic_dec_return arch_atomic_dec_return
                                                   >> 636 #endif
1215                                                  637 
1216 /**                                           !! 638 #ifndef arch_atomic_dec_return_acquire
1217  * raw_atomic_dec_return() - atomic decrement << 
1218  * @v: pointer to atomic_t                    << 
1219  *                                            << 
1220  * Atomically updates @v to (@v - 1) with ful << 
1221  *                                            << 
1222  * Safe to use in noinstr code; prefer atomic << 
1223  *                                            << 
1224  * Return: The updated value of @v.           << 
1225  */                                           << 
1226 static __always_inline int                       639 static __always_inline int
1227 raw_atomic_dec_return(atomic_t *v)            !! 640 arch_atomic_dec_return_acquire(atomic_t *v)
1228 {                                                641 {
1229 #if defined(arch_atomic_dec_return)           !! 642         return arch_atomic_sub_return_acquire(1, v);
1230         return arch_atomic_dec_return(v);     !! 643 }
1231 #elif defined(arch_atomic_dec_return_relaxed) !! 644 #define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
1232         int ret;                              !! 645 #endif
1233         __atomic_pre_full_fence();            !! 646 
1234         ret = arch_atomic_dec_return_relaxed( !! 647 #ifndef arch_atomic_dec_return_release
1235         __atomic_post_full_fence();           !! 648 static __always_inline int
1236         return ret;                           !! 649 arch_atomic_dec_return_release(atomic_t *v)
1237 #else                                         !! 650 {
1238         return raw_atomic_sub_return(1, v);   !! 651         return arch_atomic_sub_return_release(1, v);
                                                   >> 652 }
                                                   >> 653 #define arch_atomic_dec_return_release arch_atomic_dec_return_release
1239 #endif                                           654 #endif
                                                   >> 655 
                                                   >> 656 #ifndef arch_atomic_dec_return_relaxed
                                                   >> 657 static __always_inline int
                                                   >> 658 arch_atomic_dec_return_relaxed(atomic_t *v)
                                                   >> 659 {
                                                   >> 660         return arch_atomic_sub_return_relaxed(1, v);
1240 }                                                661 }
                                                   >> 662 #define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
                                                   >> 663 #endif
1241                                                  664 
1242 /**                                           !! 665 #else /* arch_atomic_dec_return_relaxed */
1243  * raw_atomic_dec_return_acquire() - atomic d !! 666 
1244  * @v: pointer to atomic_t                    !! 667 #ifndef arch_atomic_dec_return_acquire
1245  *                                            << 
1246  * Atomically updates @v to (@v - 1) with acq << 
1247  *                                            << 
1248  * Safe to use in noinstr code; prefer atomic << 
1249  *                                            << 
1250  * Return: The updated value of @v.           << 
1251  */                                           << 
1252 static __always_inline int                       668 static __always_inline int
1253 raw_atomic_dec_return_acquire(atomic_t *v)    !! 669 arch_atomic_dec_return_acquire(atomic_t *v)
1254 {                                                670 {
1255 #if defined(arch_atomic_dec_return_acquire)   << 
1256         return arch_atomic_dec_return_acquire << 
1257 #elif defined(arch_atomic_dec_return_relaxed) << 
1258         int ret = arch_atomic_dec_return_rela    671         int ret = arch_atomic_dec_return_relaxed(v);
1259         __atomic_acquire_fence();                672         __atomic_acquire_fence();
1260         return ret;                              673         return ret;
1261 #elif defined(arch_atomic_dec_return)         << 
1262         return arch_atomic_dec_return(v);     << 
1263 #else                                         << 
1264         return raw_atomic_sub_return_acquire( << 
1265 #endif                                        << 
1266 }                                                674 }
                                                   >> 675 #define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
                                                   >> 676 #endif
1267                                                  677 
1268 /**                                           !! 678 #ifndef arch_atomic_dec_return_release
1269  * raw_atomic_dec_return_release() - atomic d << 
1270  * @v: pointer to atomic_t                    << 
1271  *                                            << 
1272  * Atomically updates @v to (@v - 1) with rel << 
1273  *                                            << 
1274  * Safe to use in noinstr code; prefer atomic << 
1275  *                                            << 
1276  * Return: The updated value of @v.           << 
1277  */                                           << 
1278 static __always_inline int                       679 static __always_inline int
1279 raw_atomic_dec_return_release(atomic_t *v)    !! 680 arch_atomic_dec_return_release(atomic_t *v)
1280 {                                                681 {
1281 #if defined(arch_atomic_dec_return_release)   << 
1282         return arch_atomic_dec_return_release << 
1283 #elif defined(arch_atomic_dec_return_relaxed) << 
1284         __atomic_release_fence();                682         __atomic_release_fence();
1285         return arch_atomic_dec_return_relaxed    683         return arch_atomic_dec_return_relaxed(v);
1286 #elif defined(arch_atomic_dec_return)         << 
1287         return arch_atomic_dec_return(v);     << 
1288 #else                                         << 
1289         return raw_atomic_sub_return_release( << 
1290 #endif                                        << 
1291 }                                                684 }
1292                                               !! 685 #define arch_atomic_dec_return_release arch_atomic_dec_return_release
1293 /**                                           << 
1294  * raw_atomic_dec_return_relaxed() - atomic d << 
1295  * @v: pointer to atomic_t                    << 
1296  *                                            << 
1297  * Atomically updates @v to (@v - 1) with rel << 
1298  *                                            << 
1299  * Safe to use in noinstr code; prefer atomic << 
1300  *                                            << 
1301  * Return: The updated value of @v.           << 
1302  */                                           << 
1303 static __always_inline int                    << 
1304 raw_atomic_dec_return_relaxed(atomic_t *v)    << 
1305 {                                             << 
1306 #if defined(arch_atomic_dec_return_relaxed)   << 
1307         return arch_atomic_dec_return_relaxed << 
1308 #elif defined(arch_atomic_dec_return)         << 
1309         return arch_atomic_dec_return(v);     << 
1310 #else                                         << 
1311         return raw_atomic_sub_return_relaxed( << 
1312 #endif                                           686 #endif
1313 }                                             << 
1314                                                  687 
1315 /**                                           !! 688 #ifndef arch_atomic_dec_return
1316  * raw_atomic_fetch_dec() - atomic decrement  << 
1317  * @v: pointer to atomic_t                    << 
1318  *                                            << 
1319  * Atomically updates @v to (@v - 1) with ful << 
1320  *                                            << 
1321  * Safe to use in noinstr code; prefer atomic << 
1322  *                                            << 
1323  * Return: The original value of @v.          << 
1324  */                                           << 
1325 static __always_inline int                       689 static __always_inline int
1326 raw_atomic_fetch_dec(atomic_t *v)             !! 690 arch_atomic_dec_return(atomic_t *v)
1327 {                                                691 {
1328 #if defined(arch_atomic_fetch_dec)            << 
1329         return arch_atomic_fetch_dec(v);      << 
1330 #elif defined(arch_atomic_fetch_dec_relaxed)  << 
1331         int ret;                                 692         int ret;
1332         __atomic_pre_full_fence();               693         __atomic_pre_full_fence();
1333         ret = arch_atomic_fetch_dec_relaxed(v !! 694         ret = arch_atomic_dec_return_relaxed(v);
1334         __atomic_post_full_fence();              695         __atomic_post_full_fence();
1335         return ret;                              696         return ret;
1336 #else                                         << 
1337         return raw_atomic_fetch_sub(1, v);    << 
1338 #endif                                        << 
1339 }                                                697 }
                                                   >> 698 #define arch_atomic_dec_return arch_atomic_dec_return
                                                   >> 699 #endif
1340                                                  700 
1341 /**                                           !! 701 #endif /* arch_atomic_dec_return_relaxed */
1342  * raw_atomic_fetch_dec_acquire() - atomic de !! 702 
1343  * @v: pointer to atomic_t                    !! 703 #ifndef arch_atomic_fetch_dec_relaxed
1344  *                                            !! 704 #ifdef arch_atomic_fetch_dec
1345  * Atomically updates @v to (@v - 1) with acq !! 705 #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
1346  *                                            !! 706 #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
1347  * Safe to use in noinstr code; prefer atomic !! 707 #define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
1348  *                                            !! 708 #endif /* arch_atomic_fetch_dec */
1349  * Return: The original value of @v.          !! 709 
1350  */                                           !! 710 #ifndef arch_atomic_fetch_dec
1351 static __always_inline int                       711 static __always_inline int
1352 raw_atomic_fetch_dec_acquire(atomic_t *v)     !! 712 arch_atomic_fetch_dec(atomic_t *v)
1353 {                                                713 {
1354 #if defined(arch_atomic_fetch_dec_acquire)    !! 714         return arch_atomic_fetch_sub(1, v);
1355         return arch_atomic_fetch_dec_acquire( << 
1356 #elif defined(arch_atomic_fetch_dec_relaxed)  << 
1357         int ret = arch_atomic_fetch_dec_relax << 
1358         __atomic_acquire_fence();             << 
1359         return ret;                           << 
1360 #elif defined(arch_atomic_fetch_dec)          << 
1361         return arch_atomic_fetch_dec(v);      << 
1362 #else                                         << 
1363         return raw_atomic_fetch_sub_acquire(1 << 
1364 #endif                                        << 
1365 }                                                715 }
                                                   >> 716 #define arch_atomic_fetch_dec arch_atomic_fetch_dec
                                                   >> 717 #endif
1366                                                  718 
1367 /**                                           !! 719 #ifndef arch_atomic_fetch_dec_acquire
1368  * raw_atomic_fetch_dec_release() - atomic de << 
1369  * @v: pointer to atomic_t                    << 
1370  *                                            << 
1371  * Atomically updates @v to (@v - 1) with rel << 
1372  *                                            << 
1373  * Safe to use in noinstr code; prefer atomic << 
1374  *                                            << 
1375  * Return: The original value of @v.          << 
1376  */                                           << 
1377 static __always_inline int                       720 static __always_inline int
1378 raw_atomic_fetch_dec_release(atomic_t *v)     !! 721 arch_atomic_fetch_dec_acquire(atomic_t *v)
1379 {                                                722 {
1380 #if defined(arch_atomic_fetch_dec_release)    !! 723         return arch_atomic_fetch_sub_acquire(1, v);
1381         return arch_atomic_fetch_dec_release( !! 724 }
1382 #elif defined(arch_atomic_fetch_dec_relaxed)  !! 725 #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
1383         __atomic_release_fence();             << 
1384         return arch_atomic_fetch_dec_relaxed( << 
1385 #elif defined(arch_atomic_fetch_dec)          << 
1386         return arch_atomic_fetch_dec(v);      << 
1387 #else                                         << 
1388         return raw_atomic_fetch_sub_release(1 << 
1389 #endif                                           726 #endif
                                                   >> 727 
                                                   >> 728 #ifndef arch_atomic_fetch_dec_release
                                                   >> 729 static __always_inline int
                                                   >> 730 arch_atomic_fetch_dec_release(atomic_t *v)
                                                   >> 731 {
                                                   >> 732         return arch_atomic_fetch_sub_release(1, v);
1390 }                                                733 }
                                                   >> 734 #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
                                                   >> 735 #endif
1391                                                  736 
1392 /**                                           !! 737 #ifndef arch_atomic_fetch_dec_relaxed
1393  * raw_atomic_fetch_dec_relaxed() - atomic de << 
1394  * @v: pointer to atomic_t                    << 
1395  *                                            << 
1396  * Atomically updates @v to (@v - 1) with rel << 
1397  *                                            << 
1398  * Safe to use in noinstr code; prefer atomic << 
1399  *                                            << 
1400  * Return: The original value of @v.          << 
1401  */                                           << 
1402 static __always_inline int                       738 static __always_inline int
1403 raw_atomic_fetch_dec_relaxed(atomic_t *v)     !! 739 arch_atomic_fetch_dec_relaxed(atomic_t *v)
1404 {                                                740 {
1405 #if defined(arch_atomic_fetch_dec_relaxed)    !! 741         return arch_atomic_fetch_sub_relaxed(1, v);
1406         return arch_atomic_fetch_dec_relaxed( !! 742 }
1407 #elif defined(arch_atomic_fetch_dec)          !! 743 #define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
1408         return arch_atomic_fetch_dec(v);      << 
1409 #else                                         << 
1410         return raw_atomic_fetch_sub_relaxed(1 << 
1411 #endif                                           744 #endif
                                                   >> 745 
                                                   >> 746 #else /* arch_atomic_fetch_dec_relaxed */
                                                   >> 747 
                                                   >> 748 #ifndef arch_atomic_fetch_dec_acquire
                                                   >> 749 static __always_inline int
                                                   >> 750 arch_atomic_fetch_dec_acquire(atomic_t *v)
                                                   >> 751 {
                                                   >> 752         int ret = arch_atomic_fetch_dec_relaxed(v);
                                                   >> 753         __atomic_acquire_fence();
                                                   >> 754         return ret;
1412 }                                                755 }
                                                   >> 756 #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
                                                   >> 757 #endif
1413                                                  758 
1414 /**                                           !! 759 #ifndef arch_atomic_fetch_dec_release
1415  * raw_atomic_and() - atomic bitwise AND with !! 760 static __always_inline int
1416  * @i: int value                              !! 761 arch_atomic_fetch_dec_release(atomic_t *v)
1417  * @v: pointer to atomic_t                    << 
1418  *                                            << 
1419  * Atomically updates @v to (@v & @i) with re << 
1420  *                                            << 
1421  * Safe to use in noinstr code; prefer atomic << 
1422  *                                            << 
1423  * Return: Nothing.                           << 
1424  */                                           << 
1425 static __always_inline void                   << 
1426 raw_atomic_and(int i, atomic_t *v)            << 
1427 {                                                762 {
1428         arch_atomic_and(i, v);                !! 763         __atomic_release_fence();
                                                   >> 764         return arch_atomic_fetch_dec_relaxed(v);
1429 }                                                765 }
                                                   >> 766 #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
                                                   >> 767 #endif
1430                                                  768 
1431 /**                                           !! 769 #ifndef arch_atomic_fetch_dec
1432  * raw_atomic_fetch_and() - atomic bitwise AN << 
1433  * @i: int value                              << 
1434  * @v: pointer to atomic_t                    << 
1435  *                                            << 
1436  * Atomically updates @v to (@v & @i) with fu << 
1437  *                                            << 
1438  * Safe to use in noinstr code; prefer atomic << 
1439  *                                            << 
1440  * Return: The original value of @v.          << 
1441  */                                           << 
1442 static __always_inline int                       770 static __always_inline int
1443 raw_atomic_fetch_and(int i, atomic_t *v)      !! 771 arch_atomic_fetch_dec(atomic_t *v)
1444 {                                                772 {
1445 #if defined(arch_atomic_fetch_and)            << 
1446         return arch_atomic_fetch_and(i, v);   << 
1447 #elif defined(arch_atomic_fetch_and_relaxed)  << 
1448         int ret;                                 773         int ret;
1449         __atomic_pre_full_fence();               774         __atomic_pre_full_fence();
1450         ret = arch_atomic_fetch_and_relaxed(i !! 775         ret = arch_atomic_fetch_dec_relaxed(v);
1451         __atomic_post_full_fence();              776         __atomic_post_full_fence();
1452         return ret;                              777         return ret;
1453 #else                                         << 
1454 #error "Unable to define raw_atomic_fetch_and << 
1455 #endif                                        << 
1456 }                                                778 }
                                                   >> 779 #define arch_atomic_fetch_dec arch_atomic_fetch_dec
                                                   >> 780 #endif
1457                                                  781 
1458 /**                                           !! 782 #endif /* arch_atomic_fetch_dec_relaxed */
1459  * raw_atomic_fetch_and_acquire() - atomic bi !! 783 
1460  * @i: int value                              !! 784 #ifndef arch_atomic_fetch_and_relaxed
1461  * @v: pointer to atomic_t                    !! 785 #define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
1462  *                                            !! 786 #define arch_atomic_fetch_and_release arch_atomic_fetch_and
1463  * Atomically updates @v to (@v & @i) with ac !! 787 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
1464  *                                            !! 788 #else /* arch_atomic_fetch_and_relaxed */
1465  * Safe to use in noinstr code; prefer atomic !! 789 
1466  *                                            !! 790 #ifndef arch_atomic_fetch_and_acquire
1467  * Return: The original value of @v.          << 
1468  */                                           << 
1469 static __always_inline int                       791 static __always_inline int
1470 raw_atomic_fetch_and_acquire(int i, atomic_t  !! 792 arch_atomic_fetch_and_acquire(int i, atomic_t *v)
1471 {                                                793 {
1472 #if defined(arch_atomic_fetch_and_acquire)    << 
1473         return arch_atomic_fetch_and_acquire( << 
1474 #elif defined(arch_atomic_fetch_and_relaxed)  << 
1475         int ret = arch_atomic_fetch_and_relax    794         int ret = arch_atomic_fetch_and_relaxed(i, v);
1476         __atomic_acquire_fence();                795         __atomic_acquire_fence();
1477         return ret;                              796         return ret;
1478 #elif defined(arch_atomic_fetch_and)          << 
1479         return arch_atomic_fetch_and(i, v);   << 
1480 #else                                         << 
1481 #error "Unable to define raw_atomic_fetch_and << 
1482 #endif                                        << 
1483 }                                                797 }
                                                   >> 798 #define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
                                                   >> 799 #endif
1484                                                  800 
1485 /**                                           !! 801 #ifndef arch_atomic_fetch_and_release
1486  * raw_atomic_fetch_and_release() - atomic bi << 
1487  * @i: int value                              << 
1488  * @v: pointer to atomic_t                    << 
1489  *                                            << 
1490  * Atomically updates @v to (@v & @i) with re << 
1491  *                                            << 
1492  * Safe to use in noinstr code; prefer atomic << 
1493  *                                            << 
1494  * Return: The original value of @v.          << 
1495  */                                           << 
1496 static __always_inline int                       802 static __always_inline int
1497 raw_atomic_fetch_and_release(int i, atomic_t  !! 803 arch_atomic_fetch_and_release(int i, atomic_t *v)
1498 {                                                804 {
1499 #if defined(arch_atomic_fetch_and_release)    << 
1500         return arch_atomic_fetch_and_release( << 
1501 #elif defined(arch_atomic_fetch_and_relaxed)  << 
1502         __atomic_release_fence();                805         __atomic_release_fence();
1503         return arch_atomic_fetch_and_relaxed(    806         return arch_atomic_fetch_and_relaxed(i, v);
1504 #elif defined(arch_atomic_fetch_and)          << 
1505         return arch_atomic_fetch_and(i, v);   << 
1506 #else                                         << 
1507 #error "Unable to define raw_atomic_fetch_and << 
1508 #endif                                        << 
1509 }                                                807 }
                                                   >> 808 #define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
                                                   >> 809 #endif
1510                                                  810 
1511 /**                                           !! 811 #ifndef arch_atomic_fetch_and
1512  * raw_atomic_fetch_and_relaxed() - atomic bi << 
1513  * @i: int value                              << 
1514  * @v: pointer to atomic_t                    << 
1515  *                                            << 
1516  * Atomically updates @v to (@v & @i) with re << 
1517  *                                            << 
1518  * Safe to use in noinstr code; prefer atomic << 
1519  *                                            << 
1520  * Return: The original value of @v.          << 
1521  */                                           << 
1522 static __always_inline int                       812 static __always_inline int
1523 raw_atomic_fetch_and_relaxed(int i, atomic_t  !! 813 arch_atomic_fetch_and(int i, atomic_t *v)
1524 {                                                814 {
1525 #if defined(arch_atomic_fetch_and_relaxed)    !! 815         int ret;
1526         return arch_atomic_fetch_and_relaxed( !! 816         __atomic_pre_full_fence();
1527 #elif defined(arch_atomic_fetch_and)          !! 817         ret = arch_atomic_fetch_and_relaxed(i, v);
1528         return arch_atomic_fetch_and(i, v);   !! 818         __atomic_post_full_fence();
1529 #else                                         !! 819         return ret;
1530 #error "Unable to define raw_atomic_fetch_and << 
1531 #endif                                        << 
1532 }                                                820 }
                                                   >> 821 #define arch_atomic_fetch_and arch_atomic_fetch_and
                                                   >> 822 #endif
1533                                                  823 
1534 /**                                           !! 824 #endif /* arch_atomic_fetch_and_relaxed */
1535  * raw_atomic_andnot() - atomic bitwise AND N !! 825 
1536  * @i: int value                              !! 826 #ifndef arch_atomic_andnot
1537  * @v: pointer to atomic_t                    << 
1538  *                                            << 
1539  * Atomically updates @v to (@v & ~@i) with r << 
1540  *                                            << 
1541  * Safe to use in noinstr code; prefer atomic << 
1542  *                                            << 
1543  * Return: Nothing.                           << 
1544  */                                           << 
1545 static __always_inline void                      827 static __always_inline void
1546 raw_atomic_andnot(int i, atomic_t *v)         !! 828 arch_atomic_andnot(int i, atomic_t *v)
1547 {                                                829 {
1548 #if defined(arch_atomic_andnot)               !! 830         arch_atomic_and(~i, v);
1549         arch_atomic_andnot(i, v);             << 
1550 #else                                         << 
1551         raw_atomic_and(~i, v);                << 
1552 #endif                                        << 
1553 }                                                831 }
                                                   >> 832 #define arch_atomic_andnot arch_atomic_andnot
                                                   >> 833 #endif
1554                                                  834 
1555 /**                                           !! 835 #ifndef arch_atomic_fetch_andnot_relaxed
1556  * raw_atomic_fetch_andnot() - atomic bitwise !! 836 #ifdef arch_atomic_fetch_andnot
1557  * @i: int value                              !! 837 #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
1558  * @v: pointer to atomic_t                    !! 838 #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
1559  *                                            !! 839 #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
1560  * Atomically updates @v to (@v & ~@i) with f !! 840 #endif /* arch_atomic_fetch_andnot */
1561  *                                            !! 841 
1562  * Safe to use in noinstr code; prefer atomic !! 842 #ifndef arch_atomic_fetch_andnot
1563  *                                            << 
1564  * Return: The original value of @v.          << 
1565  */                                           << 
1566 static __always_inline int                       843 static __always_inline int
1567 raw_atomic_fetch_andnot(int i, atomic_t *v)   !! 844 arch_atomic_fetch_andnot(int i, atomic_t *v)
1568 {                                                845 {
1569 #if defined(arch_atomic_fetch_andnot)         !! 846         return arch_atomic_fetch_and(~i, v);
1570         return arch_atomic_fetch_andnot(i, v) << 
1571 #elif defined(arch_atomic_fetch_andnot_relaxe << 
1572         int ret;                              << 
1573         __atomic_pre_full_fence();            << 
1574         ret = arch_atomic_fetch_andnot_relaxe << 
1575         __atomic_post_full_fence();           << 
1576         return ret;                           << 
1577 #else                                         << 
1578         return raw_atomic_fetch_and(~i, v);   << 
1579 #endif                                        << 
1580 }                                                847 }
                                                   >> 848 #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
                                                   >> 849 #endif
1581                                                  850 
1582 /**                                           !! 851 #ifndef arch_atomic_fetch_andnot_acquire
1583  * raw_atomic_fetch_andnot_acquire() - atomic << 
1584  * @i: int value                              << 
1585  * @v: pointer to atomic_t                    << 
1586  *                                            << 
1587  * Atomically updates @v to (@v & ~@i) with a << 
1588  *                                            << 
1589  * Safe to use in noinstr code; prefer atomic << 
1590  *                                            << 
1591  * Return: The original value of @v.          << 
1592  */                                           << 
1593 static __always_inline int                       852 static __always_inline int
1594 raw_atomic_fetch_andnot_acquire(int i, atomic !! 853 arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1595 {                                                854 {
1596 #if defined(arch_atomic_fetch_andnot_acquire) !! 855         return arch_atomic_fetch_and_acquire(~i, v);
1597         return arch_atomic_fetch_andnot_acqui << 
1598 #elif defined(arch_atomic_fetch_andnot_relaxe << 
1599         int ret = arch_atomic_fetch_andnot_re << 
1600         __atomic_acquire_fence();             << 
1601         return ret;                           << 
1602 #elif defined(arch_atomic_fetch_andnot)       << 
1603         return arch_atomic_fetch_andnot(i, v) << 
1604 #else                                         << 
1605         return raw_atomic_fetch_and_acquire(~ << 
1606 #endif                                        << 
1607 }                                                856 }
                                                   >> 857 #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
                                                   >> 858 #endif
1608                                                  859 
1609 /**                                           !! 860 #ifndef arch_atomic_fetch_andnot_release
1610  * raw_atomic_fetch_andnot_release() - atomic << 
1611  * @i: int value                              << 
1612  * @v: pointer to atomic_t                    << 
1613  *                                            << 
1614  * Atomically updates @v to (@v & ~@i) with r << 
1615  *                                            << 
1616  * Safe to use in noinstr code; prefer atomic << 
1617  *                                            << 
1618  * Return: The original value of @v.          << 
1619  */                                           << 
1620 static __always_inline int                       861 static __always_inline int
1621 raw_atomic_fetch_andnot_release(int i, atomic !! 862 arch_atomic_fetch_andnot_release(int i, atomic_t *v)
1622 {                                                863 {
1623 #if defined(arch_atomic_fetch_andnot_release) !! 864         return arch_atomic_fetch_and_release(~i, v);
1624         return arch_atomic_fetch_andnot_relea << 
1625 #elif defined(arch_atomic_fetch_andnot_relaxe << 
1626         __atomic_release_fence();             << 
1627         return arch_atomic_fetch_andnot_relax << 
1628 #elif defined(arch_atomic_fetch_andnot)       << 
1629         return arch_atomic_fetch_andnot(i, v) << 
1630 #else                                         << 
1631         return raw_atomic_fetch_and_release(~ << 
1632 #endif                                        << 
1633 }                                                865 }
                                                   >> 866 #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
                                                   >> 867 #endif
1634                                                  868 
1635 /**                                           !! 869 #ifndef arch_atomic_fetch_andnot_relaxed
1636  * raw_atomic_fetch_andnot_relaxed() - atomic << 
1637  * @i: int value                              << 
1638  * @v: pointer to atomic_t                    << 
1639  *                                            << 
1640  * Atomically updates @v to (@v & ~@i) with r << 
1641  *                                            << 
1642  * Safe to use in noinstr code; prefer atomic << 
1643  *                                            << 
1644  * Return: The original value of @v.          << 
1645  */                                           << 
1646 static __always_inline int                       870 static __always_inline int
1647 raw_atomic_fetch_andnot_relaxed(int i, atomic !! 871 arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
1648 {                                                872 {
1649 #if defined(arch_atomic_fetch_andnot_relaxed) !! 873         return arch_atomic_fetch_and_relaxed(~i, v);
1650         return arch_atomic_fetch_andnot_relax !! 874 }
1651 #elif defined(arch_atomic_fetch_andnot)       !! 875 #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
1652         return arch_atomic_fetch_andnot(i, v) << 
1653 #else                                         << 
1654         return raw_atomic_fetch_and_relaxed(~ << 
1655 #endif                                           876 #endif
                                                   >> 877 
                                                   >> 878 #else /* arch_atomic_fetch_andnot_relaxed */
                                                   >> 879 
                                                   >> 880 #ifndef arch_atomic_fetch_andnot_acquire
                                                   >> 881 static __always_inline int
                                                   >> 882 arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
                                                   >> 883 {
                                                   >> 884         int ret = arch_atomic_fetch_andnot_relaxed(i, v);
                                                   >> 885         __atomic_acquire_fence();
                                                   >> 886         return ret;
1656 }                                                887 }
                                                   >> 888 #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
                                                   >> 889 #endif
1657                                                  890 
1658 /**                                           !! 891 #ifndef arch_atomic_fetch_andnot_release
1659  * raw_atomic_or() - atomic bitwise OR with r !! 892 static __always_inline int
1660  * @i: int value                              !! 893 arch_atomic_fetch_andnot_release(int i, atomic_t *v)
1661  * @v: pointer to atomic_t                    << 
1662  *                                            << 
1663  * Atomically updates @v to (@v | @i) with re << 
1664  *                                            << 
1665  * Safe to use in noinstr code; prefer atomic << 
1666  *                                            << 
1667  * Return: Nothing.                           << 
1668  */                                           << 
1669 static __always_inline void                   << 
1670 raw_atomic_or(int i, atomic_t *v)             << 
1671 {                                                894 {
1672         arch_atomic_or(i, v);                 !! 895         __atomic_release_fence();
                                                   >> 896         return arch_atomic_fetch_andnot_relaxed(i, v);
1673 }                                                897 }
                                                   >> 898 #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
                                                   >> 899 #endif
1674                                                  900 
1675 /**                                           !! 901 #ifndef arch_atomic_fetch_andnot
1676  * raw_atomic_fetch_or() - atomic bitwise OR  << 
1677  * @i: int value                              << 
1678  * @v: pointer to atomic_t                    << 
1679  *                                            << 
1680  * Atomically updates @v to (@v | @i) with fu << 
1681  *                                            << 
1682  * Safe to use in noinstr code; prefer atomic << 
1683  *                                            << 
1684  * Return: The original value of @v.          << 
1685  */                                           << 
1686 static __always_inline int                       902 static __always_inline int
1687 raw_atomic_fetch_or(int i, atomic_t *v)       !! 903 arch_atomic_fetch_andnot(int i, atomic_t *v)
1688 {                                                904 {
1689 #if defined(arch_atomic_fetch_or)             << 
1690         return arch_atomic_fetch_or(i, v);    << 
1691 #elif defined(arch_atomic_fetch_or_relaxed)   << 
1692         int ret;                                 905         int ret;
1693         __atomic_pre_full_fence();               906         __atomic_pre_full_fence();
1694         ret = arch_atomic_fetch_or_relaxed(i, !! 907         ret = arch_atomic_fetch_andnot_relaxed(i, v);
1695         __atomic_post_full_fence();              908         __atomic_post_full_fence();
1696         return ret;                              909         return ret;
1697 #else                                         << 
1698 #error "Unable to define raw_atomic_fetch_or" << 
1699 #endif                                        << 
1700 }                                                910 }
                                                   >> 911 #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
                                                   >> 912 #endif
1701                                                  913 
1702 /**                                           !! 914 #endif /* arch_atomic_fetch_andnot_relaxed */
1703  * raw_atomic_fetch_or_acquire() - atomic bit !! 915 
1704  * @i: int value                              !! 916 #ifndef arch_atomic_fetch_or_relaxed
1705  * @v: pointer to atomic_t                    !! 917 #define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
1706  *                                            !! 918 #define arch_atomic_fetch_or_release arch_atomic_fetch_or
1707  * Atomically updates @v to (@v | @i) with ac !! 919 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
1708  *                                            !! 920 #else /* arch_atomic_fetch_or_relaxed */
1709  * Safe to use in noinstr code; prefer atomic !! 921 
1710  *                                            !! 922 #ifndef arch_atomic_fetch_or_acquire
1711  * Return: The original value of @v.          << 
1712  */                                           << 
1713 static __always_inline int                       923 static __always_inline int
1714 raw_atomic_fetch_or_acquire(int i, atomic_t * !! 924 arch_atomic_fetch_or_acquire(int i, atomic_t *v)
1715 {                                                925 {
1716 #if defined(arch_atomic_fetch_or_acquire)     << 
1717         return arch_atomic_fetch_or_acquire(i << 
1718 #elif defined(arch_atomic_fetch_or_relaxed)   << 
1719         int ret = arch_atomic_fetch_or_relaxe    926         int ret = arch_atomic_fetch_or_relaxed(i, v);
1720         __atomic_acquire_fence();                927         __atomic_acquire_fence();
1721         return ret;                              928         return ret;
1722 #elif defined(arch_atomic_fetch_or)           << 
1723         return arch_atomic_fetch_or(i, v);    << 
1724 #else                                         << 
1725 #error "Unable to define raw_atomic_fetch_or_ << 
1726 #endif                                        << 
1727 }                                                929 }
                                                   >> 930 #define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
                                                   >> 931 #endif
1728                                                  932 
1729 /**                                           !! 933 #ifndef arch_atomic_fetch_or_release
1730  * raw_atomic_fetch_or_release() - atomic bit << 
1731  * @i: int value                              << 
1732  * @v: pointer to atomic_t                    << 
1733  *                                            << 
1734  * Atomically updates @v to (@v | @i) with re << 
1735  *                                            << 
1736  * Safe to use in noinstr code; prefer atomic << 
1737  *                                            << 
1738  * Return: The original value of @v.          << 
1739  */                                           << 
1740 static __always_inline int                       934 static __always_inline int
1741 raw_atomic_fetch_or_release(int i, atomic_t * !! 935 arch_atomic_fetch_or_release(int i, atomic_t *v)
1742 {                                                936 {
1743 #if defined(arch_atomic_fetch_or_release)     << 
1744         return arch_atomic_fetch_or_release(i << 
1745 #elif defined(arch_atomic_fetch_or_relaxed)   << 
1746         __atomic_release_fence();                937         __atomic_release_fence();
1747         return arch_atomic_fetch_or_relaxed(i    938         return arch_atomic_fetch_or_relaxed(i, v);
1748 #elif defined(arch_atomic_fetch_or)           << 
1749         return arch_atomic_fetch_or(i, v);    << 
1750 #else                                         << 
1751 #error "Unable to define raw_atomic_fetch_or_ << 
1752 #endif                                        << 
1753 }                                                939 }
1754                                               !! 940 #define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
1755 /**                                           << 
1756  * raw_atomic_fetch_or_relaxed() - atomic bit << 
1757  * @i: int value                              << 
1758  * @v: pointer to atomic_t                    << 
1759  *                                            << 
1760  * Atomically updates @v to (@v | @i) with re << 
1761  *                                            << 
1762  * Safe to use in noinstr code; prefer atomic << 
1763  *                                            << 
1764  * Return: The original value of @v.          << 
1765  */                                           << 
1766 static __always_inline int                    << 
1767 raw_atomic_fetch_or_relaxed(int i, atomic_t * << 
1768 {                                             << 
1769 #if defined(arch_atomic_fetch_or_relaxed)     << 
1770         return arch_atomic_fetch_or_relaxed(i << 
1771 #elif defined(arch_atomic_fetch_or)           << 
1772         return arch_atomic_fetch_or(i, v);    << 
1773 #else                                         << 
1774 #error "Unable to define raw_atomic_fetch_or_ << 
1775 #endif                                           941 #endif
1776 }                                             << 
1777                                                  942 
1778 /**                                           !! 943 #ifndef arch_atomic_fetch_or
1779  * raw_atomic_xor() - atomic bitwise XOR with << 
1780  * @i: int value                              << 
1781  * @v: pointer to atomic_t                    << 
1782  *                                            << 
1783  * Atomically updates @v to (@v ^ @i) with re << 
1784  *                                            << 
1785  * Safe to use in noinstr code; prefer atomic << 
1786  *                                            << 
1787  * Return: Nothing.                           << 
1788  */                                           << 
1789 static __always_inline void                   << 
1790 raw_atomic_xor(int i, atomic_t *v)            << 
1791 {                                             << 
1792         arch_atomic_xor(i, v);                << 
1793 }                                             << 
1794                                               << 
1795 /**                                           << 
1796  * raw_atomic_fetch_xor() - atomic bitwise XO << 
1797  * @i: int value                              << 
1798  * @v: pointer to atomic_t                    << 
1799  *                                            << 
1800  * Atomically updates @v to (@v ^ @i) with fu << 
1801  *                                            << 
1802  * Safe to use in noinstr code; prefer atomic << 
1803  *                                            << 
1804  * Return: The original value of @v.          << 
1805  */                                           << 
1806 static __always_inline int                       944 static __always_inline int
1807 raw_atomic_fetch_xor(int i, atomic_t *v)      !! 945 arch_atomic_fetch_or(int i, atomic_t *v)
1808 {                                                946 {
1809 #if defined(arch_atomic_fetch_xor)            << 
1810         return arch_atomic_fetch_xor(i, v);   << 
1811 #elif defined(arch_atomic_fetch_xor_relaxed)  << 
1812         int ret;                                 947         int ret;
1813         __atomic_pre_full_fence();               948         __atomic_pre_full_fence();
1814         ret = arch_atomic_fetch_xor_relaxed(i !! 949         ret = arch_atomic_fetch_or_relaxed(i, v);
1815         __atomic_post_full_fence();              950         __atomic_post_full_fence();
1816         return ret;                              951         return ret;
1817 #else                                         << 
1818 #error "Unable to define raw_atomic_fetch_xor << 
1819 #endif                                        << 
1820 }                                                952 }
                                                   >> 953 #define arch_atomic_fetch_or arch_atomic_fetch_or
                                                   >> 954 #endif
1821                                                  955 
1822 /**                                           !! 956 #endif /* arch_atomic_fetch_or_relaxed */
1823  * raw_atomic_fetch_xor_acquire() - atomic bi !! 957 
1824  * @i: int value                              !! 958 #ifndef arch_atomic_fetch_xor_relaxed
1825  * @v: pointer to atomic_t                    !! 959 #define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
1826  *                                            !! 960 #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
1827  * Atomically updates @v to (@v ^ @i) with ac !! 961 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
1828  *                                            !! 962 #else /* arch_atomic_fetch_xor_relaxed */
1829  * Safe to use in noinstr code; prefer atomic !! 963 
1830  *                                            !! 964 #ifndef arch_atomic_fetch_xor_acquire
1831  * Return: The original value of @v.          << 
1832  */                                           << 
1833 static __always_inline int                       965 static __always_inline int
1834 raw_atomic_fetch_xor_acquire(int i, atomic_t  !! 966 arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
1835 {                                                967 {
1836 #if defined(arch_atomic_fetch_xor_acquire)    << 
1837         return arch_atomic_fetch_xor_acquire( << 
1838 #elif defined(arch_atomic_fetch_xor_relaxed)  << 
1839         int ret = arch_atomic_fetch_xor_relax    968         int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840         __atomic_acquire_fence();                969         __atomic_acquire_fence();
1841         return ret;                              970         return ret;
1842 #elif defined(arch_atomic_fetch_xor)          << 
1843         return arch_atomic_fetch_xor(i, v);   << 
1844 #else                                         << 
1845 #error "Unable to define raw_atomic_fetch_xor << 
1846 #endif                                        << 
1847 }                                                971 }
                                                   >> 972 #define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
                                                   >> 973 #endif
1848                                                  974 
1849 /**                                           !! 975 #ifndef arch_atomic_fetch_xor_release
1850  * raw_atomic_fetch_xor_release() - atomic bi << 
1851  * @i: int value                              << 
1852  * @v: pointer to atomic_t                    << 
1853  *                                            << 
1854  * Atomically updates @v to (@v ^ @i) with re << 
1855  *                                            << 
1856  * Safe to use in noinstr code; prefer atomic << 
1857  *                                            << 
1858  * Return: The original value of @v.          << 
1859  */                                           << 
1860 static __always_inline int                       976 static __always_inline int
1861 raw_atomic_fetch_xor_release(int i, atomic_t  !! 977 arch_atomic_fetch_xor_release(int i, atomic_t *v)
1862 {                                                978 {
1863 #if defined(arch_atomic_fetch_xor_release)    << 
1864         return arch_atomic_fetch_xor_release( << 
1865 #elif defined(arch_atomic_fetch_xor_relaxed)  << 
1866         __atomic_release_fence();                979         __atomic_release_fence();
1867         return arch_atomic_fetch_xor_relaxed(    980         return arch_atomic_fetch_xor_relaxed(i, v);
1868 #elif defined(arch_atomic_fetch_xor)          << 
1869         return arch_atomic_fetch_xor(i, v);   << 
1870 #else                                         << 
1871 #error "Unable to define raw_atomic_fetch_xor << 
1872 #endif                                        << 
1873 }                                                981 }
1874                                               !! 982 #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
1875 /**                                           << 
1876  * raw_atomic_fetch_xor_relaxed() - atomic bi << 
1877  * @i: int value                              << 
1878  * @v: pointer to atomic_t                    << 
1879  *                                            << 
1880  * Atomically updates @v to (@v ^ @i) with re << 
1881  *                                            << 
1882  * Safe to use in noinstr code; prefer atomic << 
1883  *                                            << 
1884  * Return: The original value of @v.          << 
1885  */                                           << 
1886 static __always_inline int                    << 
1887 raw_atomic_fetch_xor_relaxed(int i, atomic_t  << 
1888 {                                             << 
1889 #if defined(arch_atomic_fetch_xor_relaxed)    << 
1890         return arch_atomic_fetch_xor_relaxed( << 
1891 #elif defined(arch_atomic_fetch_xor)          << 
1892         return arch_atomic_fetch_xor(i, v);   << 
1893 #else                                         << 
1894 #error "Unable to define raw_atomic_fetch_xor << 
1895 #endif                                           983 #endif
1896 }                                             << 
1897                                                  984 
1898 /**                                           !! 985 #ifndef arch_atomic_fetch_xor
1899  * raw_atomic_xchg() - atomic exchange with f << 
1900  * @v: pointer to atomic_t                    << 
1901  * @new: int value to assign                  << 
1902  *                                            << 
1903  * Atomically updates @v to @new with full or << 
1904  *                                            << 
1905  * Safe to use in noinstr code; prefer atomic << 
1906  *                                            << 
1907  * Return: The original value of @v.          << 
1908  */                                           << 
1909 static __always_inline int                       986 static __always_inline int
1910 raw_atomic_xchg(atomic_t *v, int new)         !! 987 arch_atomic_fetch_xor(int i, atomic_t *v)
1911 {                                                988 {
1912 #if defined(arch_atomic_xchg)                 << 
1913         return arch_atomic_xchg(v, new);      << 
1914 #elif defined(arch_atomic_xchg_relaxed)       << 
1915         int ret;                                 989         int ret;
1916         __atomic_pre_full_fence();               990         __atomic_pre_full_fence();
1917         ret = arch_atomic_xchg_relaxed(v, new !! 991         ret = arch_atomic_fetch_xor_relaxed(i, v);
1918         __atomic_post_full_fence();              992         __atomic_post_full_fence();
1919         return ret;                              993         return ret;
1920 #else                                         << 
1921         return raw_xchg(&v->counter, new);    << 
1922 #endif                                        << 
1923 }                                                994 }
                                                   >> 995 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
                                                   >> 996 #endif
1924                                                  997 
1925 /**                                           !! 998 #endif /* arch_atomic_fetch_xor_relaxed */
1926  * raw_atomic_xchg_acquire() - atomic exchang !! 999 
1927  * @v: pointer to atomic_t                    !! 1000 #ifndef arch_atomic_xchg_relaxed
1928  * @new: int value to assign                  !! 1001 #define arch_atomic_xchg_acquire arch_atomic_xchg
1929  *                                            !! 1002 #define arch_atomic_xchg_release arch_atomic_xchg
1930  * Atomically updates @v to @new with acquire !! 1003 #define arch_atomic_xchg_relaxed arch_atomic_xchg
1931  *                                            !! 1004 #else /* arch_atomic_xchg_relaxed */
1932  * Safe to use in noinstr code; prefer atomic !! 1005 
1933  *                                            !! 1006 #ifndef arch_atomic_xchg_acquire
1934  * Return: The original value of @v.          << 
1935  */                                           << 
1936 static __always_inline int                       1007 static __always_inline int
1937 raw_atomic_xchg_acquire(atomic_t *v, int new) !! 1008 arch_atomic_xchg_acquire(atomic_t *v, int i)
1938 {                                                1009 {
1939 #if defined(arch_atomic_xchg_acquire)         !! 1010         int ret = arch_atomic_xchg_relaxed(v, i);
1940         return arch_atomic_xchg_acquire(v, ne << 
1941 #elif defined(arch_atomic_xchg_relaxed)       << 
1942         int ret = arch_atomic_xchg_relaxed(v, << 
1943         __atomic_acquire_fence();                1011         __atomic_acquire_fence();
1944         return ret;                              1012         return ret;
1945 #elif defined(arch_atomic_xchg)               << 
1946         return arch_atomic_xchg(v, new);      << 
1947 #else                                         << 
1948         return raw_xchg_acquire(&v->counter,  << 
1949 #endif                                        << 
1950 }                                                1013 }
                                                   >> 1014 #define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
                                                   >> 1015 #endif
1951                                                  1016 
1952 /**                                           !! 1017 #ifndef arch_atomic_xchg_release
1953  * raw_atomic_xchg_release() - atomic exchang << 
1954  * @v: pointer to atomic_t                    << 
1955  * @new: int value to assign                  << 
1956  *                                            << 
1957  * Atomically updates @v to @new with release << 
1958  *                                            << 
1959  * Safe to use in noinstr code; prefer atomic << 
1960  *                                            << 
1961  * Return: The original value of @v.          << 
1962  */                                           << 
1963 static __always_inline int                       1018 static __always_inline int
1964 raw_atomic_xchg_release(atomic_t *v, int new) !! 1019 arch_atomic_xchg_release(atomic_t *v, int i)
1965 {                                                1020 {
1966 #if defined(arch_atomic_xchg_release)         << 
1967         return arch_atomic_xchg_release(v, ne << 
1968 #elif defined(arch_atomic_xchg_relaxed)       << 
1969         __atomic_release_fence();                1021         __atomic_release_fence();
1970         return arch_atomic_xchg_relaxed(v, ne !! 1022         return arch_atomic_xchg_relaxed(v, i);
1971 #elif defined(arch_atomic_xchg)               << 
1972         return arch_atomic_xchg(v, new);      << 
1973 #else                                         << 
1974         return raw_xchg_release(&v->counter,  << 
1975 #endif                                        << 
1976 }                                                1023 }
1977                                               !! 1024 #define arch_atomic_xchg_release arch_atomic_xchg_release
1978 /**                                           << 
1979  * raw_atomic_xchg_relaxed() - atomic exchang << 
1980  * @v: pointer to atomic_t                    << 
1981  * @new: int value to assign                  << 
1982  *                                            << 
1983  * Atomically updates @v to @new with relaxed << 
1984  *                                            << 
1985  * Safe to use in noinstr code; prefer atomic << 
1986  *                                            << 
1987  * Return: The original value of @v.          << 
1988  */                                           << 
1989 static __always_inline int                    << 
1990 raw_atomic_xchg_relaxed(atomic_t *v, int new) << 
1991 {                                             << 
1992 #if defined(arch_atomic_xchg_relaxed)         << 
1993         return arch_atomic_xchg_relaxed(v, ne << 
1994 #elif defined(arch_atomic_xchg)               << 
1995         return arch_atomic_xchg(v, new);      << 
1996 #else                                         << 
1997         return raw_xchg_relaxed(&v->counter,  << 
1998 #endif                                           1025 #endif
1999 }                                             << 
2000                                                  1026 
2001 /**                                           !! 1027 #ifndef arch_atomic_xchg
2002  * raw_atomic_cmpxchg() - atomic compare and  << 
2003  * @v: pointer to atomic_t                    << 
2004  * @old: int value to compare with            << 
2005  * @new: int value to assign                  << 
2006  *                                            << 
2007  * If (@v == @old), atomically updates @v to  << 
2008  * Otherwise, @v is not modified and relaxed  << 
2009  *                                            << 
2010  * Safe to use in noinstr code; prefer atomic << 
2011  *                                            << 
2012  * Return: The original value of @v.          << 
2013  */                                           << 
2014 static __always_inline int                       1028 static __always_inline int
2015 raw_atomic_cmpxchg(atomic_t *v, int old, int  !! 1029 arch_atomic_xchg(atomic_t *v, int i)
2016 {                                                1030 {
2017 #if defined(arch_atomic_cmpxchg)              << 
2018         return arch_atomic_cmpxchg(v, old, ne << 
2019 #elif defined(arch_atomic_cmpxchg_relaxed)    << 
2020         int ret;                                 1031         int ret;
2021         __atomic_pre_full_fence();               1032         __atomic_pre_full_fence();
2022         ret = arch_atomic_cmpxchg_relaxed(v,  !! 1033         ret = arch_atomic_xchg_relaxed(v, i);
2023         __atomic_post_full_fence();              1034         __atomic_post_full_fence();
2024         return ret;                              1035         return ret;
2025 #else                                         << 
2026         return raw_cmpxchg(&v->counter, old,  << 
2027 #endif                                        << 
2028 }                                                1036 }
                                                   >> 1037 #define arch_atomic_xchg arch_atomic_xchg
                                                   >> 1038 #endif
2029                                                  1039 
2030 /**                                           !! 1040 #endif /* arch_atomic_xchg_relaxed */
2031  * raw_atomic_cmpxchg_acquire() - atomic comp !! 1041 
2032  * @v: pointer to atomic_t                    !! 1042 #ifndef arch_atomic_cmpxchg_relaxed
2033  * @old: int value to compare with            !! 1043 #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
2034  * @new: int value to assign                  !! 1044 #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
2035  *                                            !! 1045 #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
2036  * If (@v == @old), atomically updates @v to  !! 1046 #else /* arch_atomic_cmpxchg_relaxed */
2037  * Otherwise, @v is not modified and relaxed  !! 1047 
2038  *                                            !! 1048 #ifndef arch_atomic_cmpxchg_acquire
2039  * Safe to use in noinstr code; prefer atomic << 
2040  *                                            << 
2041  * Return: The original value of @v.          << 
2042  */                                           << 
2043 static __always_inline int                       1049 static __always_inline int
2044 raw_atomic_cmpxchg_acquire(atomic_t *v, int o !! 1050 arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2045 {                                                1051 {
2046 #if defined(arch_atomic_cmpxchg_acquire)      << 
2047         return arch_atomic_cmpxchg_acquire(v, << 
2048 #elif defined(arch_atomic_cmpxchg_relaxed)    << 
2049         int ret = arch_atomic_cmpxchg_relaxed    1052         int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2050         __atomic_acquire_fence();                1053         __atomic_acquire_fence();
2051         return ret;                              1054         return ret;
2052 #elif defined(arch_atomic_cmpxchg)            << 
2053         return arch_atomic_cmpxchg(v, old, ne << 
2054 #else                                         << 
2055         return raw_cmpxchg_acquire(&v->counte << 
2056 #endif                                        << 
2057 }                                                1055 }
                                                   >> 1056 #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
                                                   >> 1057 #endif
2058                                                  1058 
2059 /**                                           !! 1059 #ifndef arch_atomic_cmpxchg_release
2060  * raw_atomic_cmpxchg_release() - atomic comp << 
2061  * @v: pointer to atomic_t                    << 
2062  * @old: int value to compare with            << 
2063  * @new: int value to assign                  << 
2064  *                                            << 
2065  * If (@v == @old), atomically updates @v to  << 
2066  * Otherwise, @v is not modified and relaxed  << 
2067  *                                            << 
2068  * Safe to use in noinstr code; prefer atomic << 
2069  *                                            << 
2070  * Return: The original value of @v.          << 
2071  */                                           << 
2072 static __always_inline int                       1060 static __always_inline int
2073 raw_atomic_cmpxchg_release(atomic_t *v, int o !! 1061 arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2074 {                                                1062 {
2075 #if defined(arch_atomic_cmpxchg_release)      << 
2076         return arch_atomic_cmpxchg_release(v, << 
2077 #elif defined(arch_atomic_cmpxchg_relaxed)    << 
2078         __atomic_release_fence();                1063         __atomic_release_fence();
2079         return arch_atomic_cmpxchg_relaxed(v,    1064         return arch_atomic_cmpxchg_relaxed(v, old, new);
2080 #elif defined(arch_atomic_cmpxchg)            << 
2081         return arch_atomic_cmpxchg(v, old, ne << 
2082 #else                                         << 
2083         return raw_cmpxchg_release(&v->counte << 
2084 #endif                                        << 
2085 }                                                1065 }
                                                   >> 1066 #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
                                                   >> 1067 #endif
2086                                                  1068 
2087 /**                                           !! 1069 #ifndef arch_atomic_cmpxchg
2088  * raw_atomic_cmpxchg_relaxed() - atomic comp << 
2089  * @v: pointer to atomic_t                    << 
2090  * @old: int value to compare with            << 
2091  * @new: int value to assign                  << 
2092  *                                            << 
2093  * If (@v == @old), atomically updates @v to  << 
2094  * Otherwise, @v is not modified and relaxed  << 
2095  *                                            << 
2096  * Safe to use in noinstr code; prefer atomic << 
2097  *                                            << 
2098  * Return: The original value of @v.          << 
2099  */                                           << 
2100 static __always_inline int                       1070 static __always_inline int
2101 raw_atomic_cmpxchg_relaxed(atomic_t *v, int o !! 1071 arch_atomic_cmpxchg(atomic_t *v, int old, int new)
2102 {                                                1072 {
2103 #if defined(arch_atomic_cmpxchg_relaxed)      !! 1073         int ret;
2104         return arch_atomic_cmpxchg_relaxed(v, !! 1074         __atomic_pre_full_fence();
2105 #elif defined(arch_atomic_cmpxchg)            !! 1075         ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2106         return arch_atomic_cmpxchg(v, old, ne !! 1076         __atomic_post_full_fence();
2107 #else                                         !! 1077         return ret;
2108         return raw_cmpxchg_relaxed(&v->counte << 
2109 #endif                                        << 
2110 }                                                1078 }
                                                   >> 1079 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
                                                   >> 1080 #endif
2111                                                  1081 
2112 /**                                           !! 1082 #endif /* arch_atomic_cmpxchg_relaxed */
2113  * raw_atomic_try_cmpxchg() - atomic compare  !! 1083 
2114  * @v: pointer to atomic_t                    !! 1084 #ifndef arch_atomic_try_cmpxchg_relaxed
2115  * @old: pointer to int value to compare with !! 1085 #ifdef arch_atomic_try_cmpxchg
2116  * @new: int value to assign                  !! 1086 #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
2117  *                                            !! 1087 #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
2118  * If (@v == @old), atomically updates @v to  !! 1088 #define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
2119  * Otherwise, @v is not modified, @old is upd !! 1089 #endif /* arch_atomic_try_cmpxchg */
2120  * and relaxed ordering is provided.          !! 1090 
2121  *                                            !! 1091 #ifndef arch_atomic_try_cmpxchg
2122  * Safe to use in noinstr code; prefer atomic << 
2123  *                                            << 
2124  * Return: @true if the exchange occured, @fa << 
2125  */                                           << 
2126 static __always_inline bool                      1092 static __always_inline bool
2127 raw_atomic_try_cmpxchg(atomic_t *v, int *old, !! 1093 arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2128 {                                                1094 {
2129 #if defined(arch_atomic_try_cmpxchg)          << 
2130         return arch_atomic_try_cmpxchg(v, old << 
2131 #elif defined(arch_atomic_try_cmpxchg_relaxed << 
2132         bool ret;                             << 
2133         __atomic_pre_full_fence();            << 
2134         ret = arch_atomic_try_cmpxchg_relaxed << 
2135         __atomic_post_full_fence();           << 
2136         return ret;                           << 
2137 #else                                         << 
2138         int r, o = *old;                         1095         int r, o = *old;
2139         r = raw_atomic_cmpxchg(v, o, new);    !! 1096         r = arch_atomic_cmpxchg(v, o, new);
2140         if (unlikely(r != o))                    1097         if (unlikely(r != o))
2141                 *old = r;                        1098                 *old = r;
2142         return likely(r == o);                   1099         return likely(r == o);
2143 #endif                                        << 
2144 }                                                1100 }
                                                   >> 1101 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
                                                   >> 1102 #endif
2145                                                  1103 
2146 /**                                           !! 1104 #ifndef arch_atomic_try_cmpxchg_acquire
2147  * raw_atomic_try_cmpxchg_acquire() - atomic  << 
2148  * @v: pointer to atomic_t                    << 
2149  * @old: pointer to int value to compare with << 
2150  * @new: int value to assign                  << 
2151  *                                            << 
2152  * If (@v == @old), atomically updates @v to  << 
2153  * Otherwise, @v is not modified, @old is upd << 
2154  * and relaxed ordering is provided.          << 
2155  *                                            << 
2156  * Safe to use in noinstr code; prefer atomic << 
2157  *                                            << 
2158  * Return: @true if the exchange occured, @fa << 
2159  */                                           << 
2160 static __always_inline bool                      1105 static __always_inline bool
2161 raw_atomic_try_cmpxchg_acquire(atomic_t *v, i !! 1106 arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2162 {                                                1107 {
2163 #if defined(arch_atomic_try_cmpxchg_acquire)  << 
2164         return arch_atomic_try_cmpxchg_acquir << 
2165 #elif defined(arch_atomic_try_cmpxchg_relaxed << 
2166         bool ret = arch_atomic_try_cmpxchg_re << 
2167         __atomic_acquire_fence();             << 
2168         return ret;                           << 
2169 #elif defined(arch_atomic_try_cmpxchg)        << 
2170         return arch_atomic_try_cmpxchg(v, old << 
2171 #else                                         << 
2172         int r, o = *old;                         1108         int r, o = *old;
2173         r = raw_atomic_cmpxchg_acquire(v, o,  !! 1109         r = arch_atomic_cmpxchg_acquire(v, o, new);
2174         if (unlikely(r != o))                    1110         if (unlikely(r != o))
2175                 *old = r;                        1111                 *old = r;
2176         return likely(r == o);                   1112         return likely(r == o);
2177 #endif                                        << 
2178 }                                                1113 }
                                                   >> 1114 #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
                                                   >> 1115 #endif
2179                                                  1116 
2180 /**                                           !! 1117 #ifndef arch_atomic_try_cmpxchg_release
2181  * raw_atomic_try_cmpxchg_release() - atomic  << 
2182  * @v: pointer to atomic_t                    << 
2183  * @old: pointer to int value to compare with << 
2184  * @new: int value to assign                  << 
2185  *                                            << 
2186  * If (@v == @old), atomically updates @v to  << 
2187  * Otherwise, @v is not modified, @old is upd << 
2188  * and relaxed ordering is provided.          << 
2189  *                                            << 
2190  * Safe to use in noinstr code; prefer atomic << 
2191  *                                            << 
2192  * Return: @true if the exchange occured, @fa << 
2193  */                                           << 
2194 static __always_inline bool                      1118 static __always_inline bool
2195 raw_atomic_try_cmpxchg_release(atomic_t *v, i !! 1119 arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2196 {                                                1120 {
2197 #if defined(arch_atomic_try_cmpxchg_release)  << 
2198         return arch_atomic_try_cmpxchg_releas << 
2199 #elif defined(arch_atomic_try_cmpxchg_relaxed << 
2200         __atomic_release_fence();             << 
2201         return arch_atomic_try_cmpxchg_relaxe << 
2202 #elif defined(arch_atomic_try_cmpxchg)        << 
2203         return arch_atomic_try_cmpxchg(v, old << 
2204 #else                                         << 
2205         int r, o = *old;                         1121         int r, o = *old;
2206         r = raw_atomic_cmpxchg_release(v, o,  !! 1122         r = arch_atomic_cmpxchg_release(v, o, new);
2207         if (unlikely(r != o))                    1123         if (unlikely(r != o))
2208                 *old = r;                        1124                 *old = r;
2209         return likely(r == o);                   1125         return likely(r == o);
2210 #endif                                        << 
2211 }                                                1126 }
                                                   >> 1127 #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
                                                   >> 1128 #endif
2212                                                  1129 
2213 /**                                           !! 1130 #ifndef arch_atomic_try_cmpxchg_relaxed
2214  * raw_atomic_try_cmpxchg_relaxed() - atomic  << 
2215  * @v: pointer to atomic_t                    << 
2216  * @old: pointer to int value to compare with << 
2217  * @new: int value to assign                  << 
2218  *                                            << 
2219  * If (@v == @old), atomically updates @v to  << 
2220  * Otherwise, @v is not modified, @old is upd << 
2221  * and relaxed ordering is provided.          << 
2222  *                                            << 
2223  * Safe to use in noinstr code; prefer atomic << 
2224  *                                            << 
2225  * Return: @true if the exchange occured, @fa << 
2226  */                                           << 
2227 static __always_inline bool                      1131 static __always_inline bool
2228 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, i !! 1132 arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
2229 {                                                1133 {
2230 #if defined(arch_atomic_try_cmpxchg_relaxed)  << 
2231         return arch_atomic_try_cmpxchg_relaxe << 
2232 #elif defined(arch_atomic_try_cmpxchg)        << 
2233         return arch_atomic_try_cmpxchg(v, old << 
2234 #else                                         << 
2235         int r, o = *old;                         1134         int r, o = *old;
2236         r = raw_atomic_cmpxchg_relaxed(v, o,  !! 1135         r = arch_atomic_cmpxchg_relaxed(v, o, new);
2237         if (unlikely(r != o))                    1136         if (unlikely(r != o))
2238                 *old = r;                        1137                 *old = r;
2239         return likely(r == o);                   1138         return likely(r == o);
                                                   >> 1139 }
                                                   >> 1140 #define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
                                                   >> 1141 #endif
                                                   >> 1142 
                                                   >> 1143 #else /* arch_atomic_try_cmpxchg_relaxed */
                                                   >> 1144 
                                                   >> 1145 #ifndef arch_atomic_try_cmpxchg_acquire
                                                   >> 1146 static __always_inline bool
                                                   >> 1147 arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
                                                   >> 1148 {
                                                   >> 1149         bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
                                                   >> 1150         __atomic_acquire_fence();
                                                   >> 1151         return ret;
                                                   >> 1152 }
                                                   >> 1153 #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
2240 #endif                                           1154 #endif
                                                   >> 1155 
                                                   >> 1156 #ifndef arch_atomic_try_cmpxchg_release
                                                   >> 1157 static __always_inline bool
                                                   >> 1158 arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
                                                   >> 1159 {
                                                   >> 1160         __atomic_release_fence();
                                                   >> 1161         return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2241 }                                                1162 }
                                                   >> 1163 #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
                                                   >> 1164 #endif
2242                                                  1165 
                                                   >> 1166 #ifndef arch_atomic_try_cmpxchg
                                                   >> 1167 static __always_inline bool
                                                   >> 1168 arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
                                                   >> 1169 {
                                                   >> 1170         bool ret;
                                                   >> 1171         __atomic_pre_full_fence();
                                                   >> 1172         ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
                                                   >> 1173         __atomic_post_full_fence();
                                                   >> 1174         return ret;
                                                   >> 1175 }
                                                   >> 1176 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
                                                   >> 1177 #endif
                                                   >> 1178 
                                                   >> 1179 #endif /* arch_atomic_try_cmpxchg_relaxed */
                                                   >> 1180 
                                                   >> 1181 #ifndef arch_atomic_sub_and_test
2243 /**                                              1182 /**
2244  * raw_atomic_sub_and_test() - atomic subtrac !! 1183  * arch_atomic_sub_and_test - subtract value from variable and test result
2245  * @i: int value to subtract                  !! 1184  * @i: integer value to subtract
2246  * @v: pointer to atomic_t                    !! 1185  * @v: pointer of type atomic_t
2247  *                                            << 
2248  * Atomically updates @v to (@v - @i) with fu << 
2249  *                                            << 
2250  * Safe to use in noinstr code; prefer atomic << 
2251  *                                               1186  *
2252  * Return: @true if the resulting value of @v !! 1187  * Atomically subtracts @i from @v and returns
                                                   >> 1188  * true if the result is zero, or false for all
                                                   >> 1189  * other cases.
2253  */                                              1190  */
2254 static __always_inline bool                      1191 static __always_inline bool
2255 raw_atomic_sub_and_test(int i, atomic_t *v)   !! 1192 arch_atomic_sub_and_test(int i, atomic_t *v)
2256 {                                                1193 {
2257 #if defined(arch_atomic_sub_and_test)         !! 1194         return arch_atomic_sub_return(i, v) == 0;
2258         return arch_atomic_sub_and_test(i, v) << 
2259 #else                                         << 
2260         return raw_atomic_sub_return(i, v) == << 
2261 #endif                                        << 
2262 }                                                1195 }
                                                   >> 1196 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
                                                   >> 1197 #endif
2263                                                  1198 
                                                   >> 1199 #ifndef arch_atomic_dec_and_test
2264 /**                                              1200 /**
2265  * raw_atomic_dec_and_test() - atomic decreme !! 1201  * arch_atomic_dec_and_test - decrement and test
2266  * @v: pointer to atomic_t                    !! 1202  * @v: pointer of type atomic_t
2267  *                                            << 
2268  * Atomically updates @v to (@v - 1) with ful << 
2269  *                                            << 
2270  * Safe to use in noinstr code; prefer atomic << 
2271  *                                               1203  *
2272  * Return: @true if the resulting value of @v !! 1204  * Atomically decrements @v by 1 and
                                                   >> 1205  * returns true if the result is 0, or false for all other
                                                   >> 1206  * cases.
2273  */                                              1207  */
2274 static __always_inline bool                      1208 static __always_inline bool
2275 raw_atomic_dec_and_test(atomic_t *v)          !! 1209 arch_atomic_dec_and_test(atomic_t *v)
2276 {                                                1210 {
2277 #if defined(arch_atomic_dec_and_test)         !! 1211         return arch_atomic_dec_return(v) == 0;
2278         return arch_atomic_dec_and_test(v);   << 
2279 #else                                         << 
2280         return raw_atomic_dec_return(v) == 0; << 
2281 #endif                                        << 
2282 }                                                1212 }
                                                   >> 1213 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
                                                   >> 1214 #endif
2283                                                  1215 
                                                   >> 1216 #ifndef arch_atomic_inc_and_test
2284 /**                                              1217 /**
2285  * raw_atomic_inc_and_test() - atomic increme !! 1218  * arch_atomic_inc_and_test - increment and test
2286  * @v: pointer to atomic_t                    !! 1219  * @v: pointer of type atomic_t
2287  *                                            << 
2288  * Atomically updates @v to (@v + 1) with ful << 
2289  *                                               1220  *
2290  * Safe to use in noinstr code; prefer atomic !! 1221  * Atomically increments @v by 1
2291  *                                            !! 1222  * and returns true if the result is zero, or false for all
2292  * Return: @true if the resulting value of @v !! 1223  * other cases.
2293  */                                              1224  */
2294 static __always_inline bool                      1225 static __always_inline bool
2295 raw_atomic_inc_and_test(atomic_t *v)          !! 1226 arch_atomic_inc_and_test(atomic_t *v)
2296 {                                                1227 {
2297 #if defined(arch_atomic_inc_and_test)         !! 1228         return arch_atomic_inc_return(v) == 0;
2298         return arch_atomic_inc_and_test(v);   << 
2299 #else                                         << 
2300         return raw_atomic_inc_return(v) == 0; << 
2301 #endif                                        << 
2302 }                                                1229 }
                                                   >> 1230 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
                                                   >> 1231 #endif
                                                   >> 1232 
                                                   >> 1233 #ifndef arch_atomic_add_negative_relaxed
                                                   >> 1234 #ifdef arch_atomic_add_negative
                                                   >> 1235 #define arch_atomic_add_negative_acquire arch_atomic_add_negative
                                                   >> 1236 #define arch_atomic_add_negative_release arch_atomic_add_negative
                                                   >> 1237 #define arch_atomic_add_negative_relaxed arch_atomic_add_negative
                                                   >> 1238 #endif /* arch_atomic_add_negative */
2303                                                  1239 
                                                   >> 1240 #ifndef arch_atomic_add_negative
2304 /**                                              1241 /**
2305  * raw_atomic_add_negative() - atomic add and !! 1242  * arch_atomic_add_negative - Add and test if negative
2306  * @i: int value to add                       !! 1243  * @i: integer value to add
2307  * @v: pointer to atomic_t                    !! 1244  * @v: pointer of type atomic_t
2308  *                                               1245  *
2309  * Atomically updates @v to (@v + @i) with fu !! 1246  * Atomically adds @i to @v and returns true if the result is negative,
2310  *                                            !! 1247  * or false when the result is greater than or equal to zero.
2311  * Safe to use in noinstr code; prefer atomic << 
2312  *                                            << 
2313  * Return: @true if the resulting value of @v << 
2314  */                                              1248  */
2315 static __always_inline bool                      1249 static __always_inline bool
2316 raw_atomic_add_negative(int i, atomic_t *v)   !! 1250 arch_atomic_add_negative(int i, atomic_t *v)
2317 {                                                1251 {
2318 #if defined(arch_atomic_add_negative)         !! 1252         return arch_atomic_add_return(i, v) < 0;
2319         return arch_atomic_add_negative(i, v) << 
2320 #elif defined(arch_atomic_add_negative_relaxe << 
2321         bool ret;                             << 
2322         __atomic_pre_full_fence();            << 
2323         ret = arch_atomic_add_negative_relaxe << 
2324         __atomic_post_full_fence();           << 
2325         return ret;                           << 
2326 #else                                         << 
2327         return raw_atomic_add_return(i, v) <  << 
2328 #endif                                        << 
2329 }                                                1253 }
                                                   >> 1254 #define arch_atomic_add_negative arch_atomic_add_negative
                                                   >> 1255 #endif
2330                                                  1256 
                                                   >> 1257 #ifndef arch_atomic_add_negative_acquire
2331 /**                                              1258 /**
2332  * raw_atomic_add_negative_acquire() - atomic !! 1259  * arch_atomic_add_negative_acquire - Add and test if negative
2333  * @i: int value to add                       !! 1260  * @i: integer value to add
2334  * @v: pointer to atomic_t                    !! 1261  * @v: pointer of type atomic_t
2335  *                                            << 
2336  * Atomically updates @v to (@v + @i) with ac << 
2337  *                                               1262  *
2338  * Safe to use in noinstr code; prefer atomic !! 1263  * Atomically adds @i to @v and returns true if the result is negative,
2339  *                                            !! 1264  * or false when the result is greater than or equal to zero.
2340  * Return: @true if the resulting value of @v << 
2341  */                                              1265  */
2342 static __always_inline bool                      1266 static __always_inline bool
2343 raw_atomic_add_negative_acquire(int i, atomic !! 1267 arch_atomic_add_negative_acquire(int i, atomic_t *v)
2344 {                                                1268 {
2345 #if defined(arch_atomic_add_negative_acquire) !! 1269         return arch_atomic_add_return_acquire(i, v) < 0;
2346         return arch_atomic_add_negative_acqui << 
2347 #elif defined(arch_atomic_add_negative_relaxe << 
2348         bool ret = arch_atomic_add_negative_r << 
2349         __atomic_acquire_fence();             << 
2350         return ret;                           << 
2351 #elif defined(arch_atomic_add_negative)       << 
2352         return arch_atomic_add_negative(i, v) << 
2353 #else                                         << 
2354         return raw_atomic_add_return_acquire( << 
2355 #endif                                        << 
2356 }                                                1270 }
                                                   >> 1271 #define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire
                                                   >> 1272 #endif
2357                                                  1273 
                                                   >> 1274 #ifndef arch_atomic_add_negative_release
2358 /**                                              1275 /**
2359  * raw_atomic_add_negative_release() - atomic !! 1276  * arch_atomic_add_negative_release - Add and test if negative
2360  * @i: int value to add                       !! 1277  * @i: integer value to add
2361  * @v: pointer to atomic_t                    !! 1278  * @v: pointer of type atomic_t
2362  *                                               1279  *
2363  * Atomically updates @v to (@v + @i) with re !! 1280  * Atomically adds @i to @v and returns true if the result is negative,
2364  *                                            !! 1281  * or false when the result is greater than or equal to zero.
2365  * Safe to use in noinstr code; prefer atomic << 
2366  *                                            << 
2367  * Return: @true if the resulting value of @v << 
2368  */                                              1282  */
2369 static __always_inline bool                      1283 static __always_inline bool
2370 raw_atomic_add_negative_release(int i, atomic !! 1284 arch_atomic_add_negative_release(int i, atomic_t *v)
2371 {                                                1285 {
2372 #if defined(arch_atomic_add_negative_release) !! 1286         return arch_atomic_add_return_release(i, v) < 0;
2373         return arch_atomic_add_negative_relea << 
2374 #elif defined(arch_atomic_add_negative_relaxe << 
2375         __atomic_release_fence();             << 
2376         return arch_atomic_add_negative_relax << 
2377 #elif defined(arch_atomic_add_negative)       << 
2378         return arch_atomic_add_negative(i, v) << 
2379 #else                                         << 
2380         return raw_atomic_add_return_release( << 
2381 #endif                                        << 
2382 }                                                1287 }
                                                   >> 1288 #define arch_atomic_add_negative_release arch_atomic_add_negative_release
                                                   >> 1289 #endif
2383                                                  1290 
                                                   >> 1291 #ifndef arch_atomic_add_negative_relaxed
2384 /**                                              1292 /**
2385  * raw_atomic_add_negative_relaxed() - atomic !! 1293  * arch_atomic_add_negative_relaxed - Add and test if negative
2386  * @i: int value to add                       !! 1294  * @i: integer value to add
2387  * @v: pointer to atomic_t                    !! 1295  * @v: pointer of type atomic_t
2388  *                                            << 
2389  * Atomically updates @v to (@v + @i) with re << 
2390  *                                               1296  *
2391  * Safe to use in noinstr code; prefer atomic !! 1297  * Atomically adds @i to @v and returns true if the result is negative,
2392  *                                            !! 1298  * or false when the result is greater than or equal to zero.
2393  * Return: @true if the resulting value of @v << 
2394  */                                              1299  */
2395 static __always_inline bool                      1300 static __always_inline bool
2396 raw_atomic_add_negative_relaxed(int i, atomic !! 1301 arch_atomic_add_negative_relaxed(int i, atomic_t *v)
                                                   >> 1302 {
                                                   >> 1303         return arch_atomic_add_return_relaxed(i, v) < 0;
                                                   >> 1304 }
                                                   >> 1305 #define arch_atomic_add_negative_relaxed arch_atomic_add_negative_relaxed
                                                   >> 1306 #endif
                                                   >> 1307 
                                                   >> 1308 #else /* arch_atomic_add_negative_relaxed */
                                                   >> 1309 
                                                   >> 1310 #ifndef arch_atomic_add_negative_acquire
                                                   >> 1311 static __always_inline bool
                                                   >> 1312 arch_atomic_add_negative_acquire(int i, atomic_t *v)
2397 {                                                1313 {
2398 #if defined(arch_atomic_add_negative_relaxed) !! 1314         bool ret = arch_atomic_add_negative_relaxed(i, v);
                                                   >> 1315         __atomic_acquire_fence();
                                                   >> 1316         return ret;
                                                   >> 1317 }
                                                   >> 1318 #define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire
                                                   >> 1319 #endif
                                                   >> 1320 
                                                   >> 1321 #ifndef arch_atomic_add_negative_release
                                                   >> 1322 static __always_inline bool
                                                   >> 1323 arch_atomic_add_negative_release(int i, atomic_t *v)
                                                   >> 1324 {
                                                   >> 1325         __atomic_release_fence();
2399         return arch_atomic_add_negative_relax    1326         return arch_atomic_add_negative_relaxed(i, v);
2400 #elif defined(arch_atomic_add_negative)       !! 1327 }
2401         return arch_atomic_add_negative(i, v) !! 1328 #define arch_atomic_add_negative_release arch_atomic_add_negative_release
2402 #else                                         << 
2403         return raw_atomic_add_return_relaxed( << 
2404 #endif                                           1329 #endif
                                                   >> 1330 
                                                   >> 1331 #ifndef arch_atomic_add_negative
                                                   >> 1332 static __always_inline bool
                                                   >> 1333 arch_atomic_add_negative(int i, atomic_t *v)
                                                   >> 1334 {
                                                   >> 1335         bool ret;
                                                   >> 1336         __atomic_pre_full_fence();
                                                   >> 1337         ret = arch_atomic_add_negative_relaxed(i, v);
                                                   >> 1338         __atomic_post_full_fence();
                                                   >> 1339         return ret;
2405 }                                                1340 }
                                                   >> 1341 #define arch_atomic_add_negative arch_atomic_add_negative
                                                   >> 1342 #endif
                                                   >> 1343 
                                                   >> 1344 #endif /* arch_atomic_add_negative_relaxed */
2406                                                  1345 
                                                   >> 1346 #ifndef arch_atomic_fetch_add_unless
2407 /**                                              1347 /**
2408  * raw_atomic_fetch_add_unless() - atomic add !! 1348  * arch_atomic_fetch_add_unless - add unless the number is already a given value
2409  * @v: pointer to atomic_t                    !! 1349  * @v: pointer of type atomic_t
2410  * @a: int value to add                       !! 1350  * @a: the amount to add to v...
2411  * @u: int value to compare with              !! 1351  * @u: ...unless v is equal to u.
2412  *                                               1352  *
2413  * If (@v != @u), atomically updates @v to (@ !! 1353  * Atomically adds @a to @v, so long as @v was not already @u.
2414  * Otherwise, @v is not modified and relaxed  !! 1354  * Returns original value of @v
2415  *                                            << 
2416  * Safe to use in noinstr code; prefer atomic << 
2417  *                                            << 
2418  * Return: The original value of @v.          << 
2419  */                                              1355  */
2420 static __always_inline int                       1356 static __always_inline int
2421 raw_atomic_fetch_add_unless(atomic_t *v, int  !! 1357 arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
2422 {                                                1358 {
2423 #if defined(arch_atomic_fetch_add_unless)     !! 1359         int c = arch_atomic_read(v);
2424         return arch_atomic_fetch_add_unless(v << 
2425 #else                                         << 
2426         int c = raw_atomic_read(v);           << 
2427                                                  1360 
2428         do {                                     1361         do {
2429                 if (unlikely(c == u))            1362                 if (unlikely(c == u))
2430                         break;                   1363                         break;
2431         } while (!raw_atomic_try_cmpxchg(v, & !! 1364         } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
2432                                                  1365 
2433         return c;                                1366         return c;
2434 #endif                                        << 
2435 }                                                1367 }
                                                   >> 1368 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
                                                   >> 1369 #endif
2436                                                  1370 
                                                   >> 1371 #ifndef arch_atomic_add_unless
2437 /**                                              1372 /**
2438  * raw_atomic_add_unless() - atomic add unles !! 1373  * arch_atomic_add_unless - add unless the number is already a given value
2439  * @v: pointer to atomic_t                    !! 1374  * @v: pointer of type atomic_t
2440  * @a: int value to add                       !! 1375  * @a: the amount to add to v...
2441  * @u: int value to compare with              !! 1376  * @u: ...unless v is equal to u.
2442  *                                               1377  *
2443  * If (@v != @u), atomically updates @v to (@ !! 1378  * Atomically adds @a to @v, if @v was not already @u.
2444  * Otherwise, @v is not modified and relaxed  !! 1379  * Returns true if the addition was done.
2445  *                                            << 
2446  * Safe to use in noinstr code; prefer atomic << 
2447  *                                            << 
2448  * Return: @true if @v was updated, @false ot << 
2449  */                                              1380  */
2450 static __always_inline bool                      1381 static __always_inline bool
2451 raw_atomic_add_unless(atomic_t *v, int a, int !! 1382 arch_atomic_add_unless(atomic_t *v, int a, int u)
2452 {                                                1383 {
2453 #if defined(arch_atomic_add_unless)           !! 1384         return arch_atomic_fetch_add_unless(v, a, u) != u;
2454         return arch_atomic_add_unless(v, a, u << 
2455 #else                                         << 
2456         return raw_atomic_fetch_add_unless(v, << 
2457 #endif                                        << 
2458 }                                                1385 }
                                                   >> 1386 #define arch_atomic_add_unless arch_atomic_add_unless
                                                   >> 1387 #endif
2459                                                  1388 
                                                   >> 1389 #ifndef arch_atomic_inc_not_zero
2460 /**                                              1390 /**
2461  * raw_atomic_inc_not_zero() - atomic increme !! 1391  * arch_atomic_inc_not_zero - increment unless the number is zero
2462  * @v: pointer to atomic_t                    !! 1392  * @v: pointer of type atomic_t
2463  *                                            << 
2464  * If (@v != 0), atomically updates @v to (@v << 
2465  * Otherwise, @v is not modified and relaxed  << 
2466  *                                               1393  *
2467  * Safe to use in noinstr code; prefer atomic !! 1394  * Atomically increments @v by 1, if @v is non-zero.
2468  *                                            !! 1395  * Returns true if the increment was done.
2469  * Return: @true if @v was updated, @false ot << 
2470  */                                              1396  */
2471 static __always_inline bool                      1397 static __always_inline bool
2472 raw_atomic_inc_not_zero(atomic_t *v)          !! 1398 arch_atomic_inc_not_zero(atomic_t *v)
2473 {                                                1399 {
2474 #if defined(arch_atomic_inc_not_zero)         !! 1400         return arch_atomic_add_unless(v, 1, 0);
2475         return arch_atomic_inc_not_zero(v);   << 
2476 #else                                         << 
2477         return raw_atomic_add_unless(v, 1, 0) << 
2478 #endif                                        << 
2479 }                                                1401 }
                                                   >> 1402 #define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
                                                   >> 1403 #endif
2480                                                  1404 
2481 /**                                           !! 1405 #ifndef arch_atomic_inc_unless_negative
2482  * raw_atomic_inc_unless_negative() - atomic  << 
2483  * @v: pointer to atomic_t                    << 
2484  *                                            << 
2485  * If (@v >= 0), atomically updates @v to (@v << 
2486  * Otherwise, @v is not modified and relaxed  << 
2487  *                                            << 
2488  * Safe to use in noinstr code; prefer atomic << 
2489  *                                            << 
2490  * Return: @true if @v was updated, @false ot << 
2491  */                                           << 
2492 static __always_inline bool                      1406 static __always_inline bool
2493 raw_atomic_inc_unless_negative(atomic_t *v)   !! 1407 arch_atomic_inc_unless_negative(atomic_t *v)
2494 {                                                1408 {
2495 #if defined(arch_atomic_inc_unless_negative)  !! 1409         int c = arch_atomic_read(v);
2496         return arch_atomic_inc_unless_negativ << 
2497 #else                                         << 
2498         int c = raw_atomic_read(v);           << 
2499                                                  1410 
2500         do {                                     1411         do {
2501                 if (unlikely(c < 0))             1412                 if (unlikely(c < 0))
2502                         return false;            1413                         return false;
2503         } while (!raw_atomic_try_cmpxchg(v, & !! 1414         } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
2504                                                  1415 
2505         return true;                             1416         return true;
2506 #endif                                        << 
2507 }                                                1417 }
                                                   >> 1418 #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
                                                   >> 1419 #endif
2508                                                  1420 
2509 /**                                           !! 1421 #ifndef arch_atomic_dec_unless_positive
2510  * raw_atomic_dec_unless_positive() - atomic  << 
2511  * @v: pointer to atomic_t                    << 
2512  *                                            << 
2513  * If (@v <= 0), atomically updates @v to (@v << 
2514  * Otherwise, @v is not modified and relaxed  << 
2515  *                                            << 
2516  * Safe to use in noinstr code; prefer atomic << 
2517  *                                            << 
2518  * Return: @true if @v was updated, @false ot << 
2519  */                                           << 
2520 static __always_inline bool                      1422 static __always_inline bool
2521 raw_atomic_dec_unless_positive(atomic_t *v)   !! 1423 arch_atomic_dec_unless_positive(atomic_t *v)
2522 {                                                1424 {
2523 #if defined(arch_atomic_dec_unless_positive)  !! 1425         int c = arch_atomic_read(v);
2524         return arch_atomic_dec_unless_positiv << 
2525 #else                                         << 
2526         int c = raw_atomic_read(v);           << 
2527                                                  1426 
2528         do {                                     1427         do {
2529                 if (unlikely(c > 0))             1428                 if (unlikely(c > 0))
2530                         return false;            1429                         return false;
2531         } while (!raw_atomic_try_cmpxchg(v, & !! 1430         } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
2532                                                  1431 
2533         return true;                             1432         return true;
2534 #endif                                        << 
2535 }                                                1433 }
                                                   >> 1434 #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
                                                   >> 1435 #endif
2536                                                  1436 
2537 /**                                           !! 1437 #ifndef arch_atomic_dec_if_positive
2538  * raw_atomic_dec_if_positive() - atomic decr << 
2539  * @v: pointer to atomic_t                    << 
2540  *                                            << 
2541  * If (@v > 0), atomically updates @v to (@v  << 
2542  * Otherwise, @v is not modified and relaxed  << 
2543  *                                            << 
2544  * Safe to use in noinstr code; prefer atomic << 
2545  *                                            << 
2546  * Return: The old value of (@v - 1), regardl << 
2547  */                                           << 
2548 static __always_inline int                       1438 static __always_inline int
2549 raw_atomic_dec_if_positive(atomic_t *v)       !! 1439 arch_atomic_dec_if_positive(atomic_t *v)
2550 {                                                1440 {
2551 #if defined(arch_atomic_dec_if_positive)      !! 1441         int dec, c = arch_atomic_read(v);
2552         return arch_atomic_dec_if_positive(v) << 
2553 #else                                         << 
2554         int dec, c = raw_atomic_read(v);      << 
2555                                                  1442 
2556         do {                                     1443         do {
2557                 dec = c - 1;                     1444                 dec = c - 1;
2558                 if (unlikely(dec < 0))           1445                 if (unlikely(dec < 0))
2559                         break;                   1446                         break;
2560         } while (!raw_atomic_try_cmpxchg(v, & !! 1447         } while (!arch_atomic_try_cmpxchg(v, &c, dec));
2561                                                  1448 
2562         return dec;                              1449         return dec;
2563 #endif                                        << 
2564 }                                                1450 }
                                                   >> 1451 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
                                                   >> 1452 #endif
2565                                                  1453 
2566 #ifdef CONFIG_GENERIC_ATOMIC64                   1454 #ifdef CONFIG_GENERIC_ATOMIC64
2567 #include <asm-generic/atomic64.h>                1455 #include <asm-generic/atomic64.h>
2568 #endif                                           1456 #endif
2569                                                  1457 
2570 /**                                           !! 1458 #ifndef arch_atomic64_read_acquire
2571  * raw_atomic64_read() - atomic load with rel << 
2572  * @v: pointer to atomic64_t                  << 
2573  *                                            << 
2574  * Atomically loads the value of @v with rela << 
2575  *                                            << 
2576  * Safe to use in noinstr code; prefer atomic << 
2577  *                                            << 
2578  * Return: The value loaded from @v.          << 
2579  */                                           << 
2580 static __always_inline s64                       1459 static __always_inline s64
2581 raw_atomic64_read(const atomic64_t *v)        !! 1460 arch_atomic64_read_acquire(const atomic64_t *v)
2582 {                                                1461 {
2583         return arch_atomic64_read(v);         << 
2584 }                                             << 
2585                                               << 
2586 /**                                           << 
2587  * raw_atomic64_read_acquire() - atomic load  << 
2588  * @v: pointer to atomic64_t                  << 
2589  *                                            << 
2590  * Atomically loads the value of @v with acqu << 
2591  *                                            << 
2592  * Safe to use in noinstr code; prefer atomic << 
2593  *                                            << 
2594  * Return: The value loaded from @v.          << 
2595  */                                           << 
2596 static __always_inline s64                    << 
2597 raw_atomic64_read_acquire(const atomic64_t *v << 
2598 {                                             << 
2599 #if defined(arch_atomic64_read_acquire)       << 
2600         return arch_atomic64_read_acquire(v); << 
2601 #else                                         << 
2602         s64 ret;                                 1462         s64 ret;
2603                                                  1463 
2604         if (__native_word(atomic64_t)) {         1464         if (__native_word(atomic64_t)) {
2605                 ret = smp_load_acquire(&(v)->    1465                 ret = smp_load_acquire(&(v)->counter);
2606         } else {                                 1466         } else {
2607                 ret = raw_atomic64_read(v);   !! 1467                 ret = arch_atomic64_read(v);
2608                 __atomic_acquire_fence();        1468                 __atomic_acquire_fence();
2609         }                                        1469         }
2610                                                  1470 
2611         return ret;                              1471         return ret;
2612 #endif                                        << 
2613 }                                                1472 }
                                                   >> 1473 #define arch_atomic64_read_acquire arch_atomic64_read_acquire
                                                   >> 1474 #endif
2614                                                  1475 
2615 /**                                           !! 1476 #ifndef arch_atomic64_set_release
2616  * raw_atomic64_set() - atomic set with relax << 
2617  * @v: pointer to atomic64_t                  << 
2618  * @i: s64 value to assign                    << 
2619  *                                            << 
2620  * Atomically sets @v to @i with relaxed orde << 
2621  *                                            << 
2622  * Safe to use in noinstr code; prefer atomic << 
2623  *                                            << 
2624  * Return: Nothing.                           << 
2625  */                                           << 
2626 static __always_inline void                      1477 static __always_inline void
2627 raw_atomic64_set(atomic64_t *v, s64 i)        !! 1478 arch_atomic64_set_release(atomic64_t *v, s64 i)
2628 {                                                1479 {
2629         arch_atomic64_set(v, i);              << 
2630 }                                             << 
2631                                               << 
2632 /**                                           << 
2633  * raw_atomic64_set_release() - atomic set wi << 
2634  * @v: pointer to atomic64_t                  << 
2635  * @i: s64 value to assign                    << 
2636  *                                            << 
2637  * Atomically sets @v to @i with release orde << 
2638  *                                            << 
2639  * Safe to use in noinstr code; prefer atomic << 
2640  *                                            << 
2641  * Return: Nothing.                           << 
2642  */                                           << 
2643 static __always_inline void                   << 
2644 raw_atomic64_set_release(atomic64_t *v, s64 i << 
2645 {                                             << 
2646 #if defined(arch_atomic64_set_release)        << 
2647         arch_atomic64_set_release(v, i);      << 
2648 #else                                         << 
2649         if (__native_word(atomic64_t)) {         1480         if (__native_word(atomic64_t)) {
2650                 smp_store_release(&(v)->count    1481                 smp_store_release(&(v)->counter, i);
2651         } else {                                 1482         } else {
2652                 __atomic_release_fence();        1483                 __atomic_release_fence();
2653                 raw_atomic64_set(v, i);       !! 1484                 arch_atomic64_set(v, i);
2654         }                                        1485         }
                                                   >> 1486 }
                                                   >> 1487 #define arch_atomic64_set_release arch_atomic64_set_release
2655 #endif                                           1488 #endif
                                                   >> 1489 
                                                   >> 1490 #ifndef arch_atomic64_add_return_relaxed
                                                   >> 1491 #define arch_atomic64_add_return_acquire arch_atomic64_add_return
                                                   >> 1492 #define arch_atomic64_add_return_release arch_atomic64_add_return
                                                   >> 1493 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return
                                                   >> 1494 #else /* arch_atomic64_add_return_relaxed */
                                                   >> 1495 
                                                   >> 1496 #ifndef arch_atomic64_add_return_acquire
                                                   >> 1497 static __always_inline s64
                                                   >> 1498 arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
                                                   >> 1499 {
                                                   >> 1500         s64 ret = arch_atomic64_add_return_relaxed(i, v);
                                                   >> 1501         __atomic_acquire_fence();
                                                   >> 1502         return ret;
2656 }                                                1503 }
                                                   >> 1504 #define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
                                                   >> 1505 #endif
2657                                                  1506 
2658 /**                                           !! 1507 #ifndef arch_atomic64_add_return_release
2659  * raw_atomic64_add() - atomic add with relax !! 1508 static __always_inline s64
2660  * @i: s64 value to add                       !! 1509 arch_atomic64_add_return_release(s64 i, atomic64_t *v)
2661  * @v: pointer to atomic64_t                  << 
2662  *                                            << 
2663  * Atomically updates @v to (@v + @i) with re << 
2664  *                                            << 
2665  * Safe to use in noinstr code; prefer atomic << 
2666  *                                            << 
2667  * Return: Nothing.                           << 
2668  */                                           << 
2669 static __always_inline void                   << 
2670 raw_atomic64_add(s64 i, atomic64_t *v)        << 
2671 {                                                1510 {
2672         arch_atomic64_add(i, v);              !! 1511         __atomic_release_fence();
                                                   >> 1512         return arch_atomic64_add_return_relaxed(i, v);
2673 }                                                1513 }
                                                   >> 1514 #define arch_atomic64_add_return_release arch_atomic64_add_return_release
                                                   >> 1515 #endif
2674                                                  1516 
2675 /**                                           !! 1517 #ifndef arch_atomic64_add_return
2676  * raw_atomic64_add_return() - atomic add wit << 
2677  * @i: s64 value to add                       << 
2678  * @v: pointer to atomic64_t                  << 
2679  *                                            << 
2680  * Atomically updates @v to (@v + @i) with fu << 
2681  *                                            << 
2682  * Safe to use in noinstr code; prefer atomic << 
2683  *                                            << 
2684  * Return: The updated value of @v.           << 
2685  */                                           << 
2686 static __always_inline s64                       1518 static __always_inline s64
2687 raw_atomic64_add_return(s64 i, atomic64_t *v) !! 1519 arch_atomic64_add_return(s64 i, atomic64_t *v)
2688 {                                                1520 {
2689 #if defined(arch_atomic64_add_return)         << 
2690         return arch_atomic64_add_return(i, v) << 
2691 #elif defined(arch_atomic64_add_return_relaxe << 
2692         s64 ret;                                 1521         s64 ret;
2693         __atomic_pre_full_fence();               1522         __atomic_pre_full_fence();
2694         ret = arch_atomic64_add_return_relaxe    1523         ret = arch_atomic64_add_return_relaxed(i, v);
2695         __atomic_post_full_fence();              1524         __atomic_post_full_fence();
2696         return ret;                              1525         return ret;
2697 #else                                         << 
2698 #error "Unable to define raw_atomic64_add_ret << 
2699 #endif                                        << 
2700 }                                                1526 }
                                                   >> 1527 #define arch_atomic64_add_return arch_atomic64_add_return
                                                   >> 1528 #endif
2701                                                  1529 
2702 /**                                           !! 1530 #endif /* arch_atomic64_add_return_relaxed */
2703  * raw_atomic64_add_return_acquire() - atomic !! 1531 
2704  * @i: s64 value to add                       !! 1532 #ifndef arch_atomic64_fetch_add_relaxed
2705  * @v: pointer to atomic64_t                  !! 1533 #define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
2706  *                                            !! 1534 #define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
2707  * Atomically updates @v to (@v + @i) with ac !! 1535 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
2708  *                                            !! 1536 #else /* arch_atomic64_fetch_add_relaxed */
2709  * Safe to use in noinstr code; prefer atomic !! 1537 
2710  *                                            !! 1538 #ifndef arch_atomic64_fetch_add_acquire
2711  * Return: The updated value of @v.           << 
2712  */                                           << 
2713 static __always_inline s64                       1539 static __always_inline s64
2714 raw_atomic64_add_return_acquire(s64 i, atomic !! 1540 arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2715 {                                                1541 {
2716 #if defined(arch_atomic64_add_return_acquire) !! 1542         s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2717         return arch_atomic64_add_return_acqui << 
2718 #elif defined(arch_atomic64_add_return_relaxe << 
2719         s64 ret = arch_atomic64_add_return_re << 
2720         __atomic_acquire_fence();                1543         __atomic_acquire_fence();
2721         return ret;                              1544         return ret;
2722 #elif defined(arch_atomic64_add_return)       << 
2723         return arch_atomic64_add_return(i, v) << 
2724 #else                                         << 
2725 #error "Unable to define raw_atomic64_add_ret << 
2726 #endif                                        << 
2727 }                                                1545 }
                                                   >> 1546 #define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
                                                   >> 1547 #endif
2728                                                  1548 
2729 /**                                           !! 1549 #ifndef arch_atomic64_fetch_add_release
2730  * raw_atomic64_add_return_release() - atomic << 
2731  * @i: s64 value to add                       << 
2732  * @v: pointer to atomic64_t                  << 
2733  *                                            << 
2734  * Atomically updates @v to (@v + @i) with re << 
2735  *                                            << 
2736  * Safe to use in noinstr code; prefer atomic << 
2737  *                                            << 
2738  * Return: The updated value of @v.           << 
2739  */                                           << 
2740 static __always_inline s64                       1550 static __always_inline s64
2741 raw_atomic64_add_return_release(s64 i, atomic !! 1551 arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2742 {                                                1552 {
2743 #if defined(arch_atomic64_add_return_release) << 
2744         return arch_atomic64_add_return_relea << 
2745 #elif defined(arch_atomic64_add_return_relaxe << 
2746         __atomic_release_fence();                1553         __atomic_release_fence();
2747         return arch_atomic64_add_return_relax !! 1554         return arch_atomic64_fetch_add_relaxed(i, v);
2748 #elif defined(arch_atomic64_add_return)       << 
2749         return arch_atomic64_add_return(i, v) << 
2750 #else                                         << 
2751 #error "Unable to define raw_atomic64_add_ret << 
2752 #endif                                        << 
2753 }                                                1555 }
2754                                               !! 1556 #define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
2755 /**                                           << 
2756  * raw_atomic64_add_return_relaxed() - atomic << 
2757  * @i: s64 value to add                       << 
2758  * @v: pointer to atomic64_t                  << 
2759  *                                            << 
2760  * Atomically updates @v to (@v + @i) with re << 
2761  *                                            << 
2762  * Safe to use in noinstr code; prefer atomic << 
2763  *                                            << 
2764  * Return: The updated value of @v.           << 
2765  */                                           << 
2766 static __always_inline s64                    << 
2767 raw_atomic64_add_return_relaxed(s64 i, atomic << 
2768 {                                             << 
2769 #if defined(arch_atomic64_add_return_relaxed) << 
2770         return arch_atomic64_add_return_relax << 
2771 #elif defined(arch_atomic64_add_return)       << 
2772         return arch_atomic64_add_return(i, v) << 
2773 #else                                         << 
2774 #error "Unable to define raw_atomic64_add_ret << 
2775 #endif                                           1557 #endif
2776 }                                             << 
2777                                                  1558 
2778 /**                                           !! 1559 #ifndef arch_atomic64_fetch_add
2779  * raw_atomic64_fetch_add() - atomic add with << 
2780  * @i: s64 value to add                       << 
2781  * @v: pointer to atomic64_t                  << 
2782  *                                            << 
2783  * Atomically updates @v to (@v + @i) with fu << 
2784  *                                            << 
2785  * Safe to use in noinstr code; prefer atomic << 
2786  *                                            << 
2787  * Return: The original value of @v.          << 
2788  */                                           << 
2789 static __always_inline s64                       1560 static __always_inline s64
2790 raw_atomic64_fetch_add(s64 i, atomic64_t *v)  !! 1561 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
2791 {                                                1562 {
2792 #if defined(arch_atomic64_fetch_add)          << 
2793         return arch_atomic64_fetch_add(i, v); << 
2794 #elif defined(arch_atomic64_fetch_add_relaxed << 
2795         s64 ret;                                 1563         s64 ret;
2796         __atomic_pre_full_fence();               1564         __atomic_pre_full_fence();
2797         ret = arch_atomic64_fetch_add_relaxed    1565         ret = arch_atomic64_fetch_add_relaxed(i, v);
2798         __atomic_post_full_fence();              1566         __atomic_post_full_fence();
2799         return ret;                              1567         return ret;
2800 #else                                         << 
2801 #error "Unable to define raw_atomic64_fetch_a << 
2802 #endif                                        << 
2803 }                                                1568 }
                                                   >> 1569 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
                                                   >> 1570 #endif
2804                                                  1571 
2805 /**                                           !! 1572 #endif /* arch_atomic64_fetch_add_relaxed */
2806  * raw_atomic64_fetch_add_acquire() - atomic  !! 1573 
2807  * @i: s64 value to add                       !! 1574 #ifndef arch_atomic64_sub_return_relaxed
2808  * @v: pointer to atomic64_t                  !! 1575 #define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
2809  *                                            !! 1576 #define arch_atomic64_sub_return_release arch_atomic64_sub_return
2810  * Atomically updates @v to (@v + @i) with ac !! 1577 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
2811  *                                            !! 1578 #else /* arch_atomic64_sub_return_relaxed */
2812  * Safe to use in noinstr code; prefer atomic !! 1579 
2813  *                                            !! 1580 #ifndef arch_atomic64_sub_return_acquire
2814  * Return: The original value of @v.          << 
2815  */                                           << 
2816 static __always_inline s64                       1581 static __always_inline s64
2817 raw_atomic64_fetch_add_acquire(s64 i, atomic6 !! 1582 arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2818 {                                                1583 {
2819 #if defined(arch_atomic64_fetch_add_acquire)  !! 1584         s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2820         return arch_atomic64_fetch_add_acquir << 
2821 #elif defined(arch_atomic64_fetch_add_relaxed << 
2822         s64 ret = arch_atomic64_fetch_add_rel << 
2823         __atomic_acquire_fence();                1585         __atomic_acquire_fence();
2824         return ret;                              1586         return ret;
2825 #elif defined(arch_atomic64_fetch_add)        << 
2826         return arch_atomic64_fetch_add(i, v); << 
2827 #else                                         << 
2828 #error "Unable to define raw_atomic64_fetch_a << 
2829 #endif                                        << 
2830 }                                                1587 }
                                                   >> 1588 #define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
                                                   >> 1589 #endif
2831                                                  1590 
2832 /**                                           !! 1591 #ifndef arch_atomic64_sub_return_release
2833  * raw_atomic64_fetch_add_release() - atomic  << 
2834  * @i: s64 value to add                       << 
2835  * @v: pointer to atomic64_t                  << 
2836  *                                            << 
2837  * Atomically updates @v to (@v + @i) with re << 
2838  *                                            << 
2839  * Safe to use in noinstr code; prefer atomic << 
2840  *                                            << 
2841  * Return: The original value of @v.          << 
2842  */                                           << 
2843 static __always_inline s64                       1592 static __always_inline s64
2844 raw_atomic64_fetch_add_release(s64 i, atomic6 !! 1593 arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
2845 {                                                1594 {
2846 #if defined(arch_atomic64_fetch_add_release)  << 
2847         return arch_atomic64_fetch_add_releas << 
2848 #elif defined(arch_atomic64_fetch_add_relaxed << 
2849         __atomic_release_fence();                1595         __atomic_release_fence();
2850         return arch_atomic64_fetch_add_relaxe !! 1596         return arch_atomic64_sub_return_relaxed(i, v);
2851 #elif defined(arch_atomic64_fetch_add)        << 
2852         return arch_atomic64_fetch_add(i, v); << 
2853 #else                                         << 
2854 #error "Unable to define raw_atomic64_fetch_a << 
2855 #endif                                        << 
2856 }                                                1597 }
2857                                               !! 1598 #define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
2858 /**                                           << 
2859  * raw_atomic64_fetch_add_relaxed() - atomic  << 
2860  * @i: s64 value to add                       << 
2861  * @v: pointer to atomic64_t                  << 
2862  *                                            << 
2863  * Atomically updates @v to (@v + @i) with re << 
2864  *                                            << 
2865  * Safe to use in noinstr code; prefer atomic << 
2866  *                                            << 
2867  * Return: The original value of @v.          << 
2868  */                                           << 
2869 static __always_inline s64                    << 
2870 raw_atomic64_fetch_add_relaxed(s64 i, atomic6 << 
2871 {                                             << 
2872 #if defined(arch_atomic64_fetch_add_relaxed)  << 
2873         return arch_atomic64_fetch_add_relaxe << 
2874 #elif defined(arch_atomic64_fetch_add)        << 
2875         return arch_atomic64_fetch_add(i, v); << 
2876 #else                                         << 
2877 #error "Unable to define raw_atomic64_fetch_a << 
2878 #endif                                           1599 #endif
2879 }                                             << 
2880                                               << 
2881 /**                                           << 
2882  * raw_atomic64_sub() - atomic subtract with  << 
2883  * @i: s64 value to subtract                  << 
2884  * @v: pointer to atomic64_t                  << 
2885  *                                            << 
2886  * Atomically updates @v to (@v - @i) with re << 
2887  *                                            << 
2888  * Safe to use in noinstr code; prefer atomic << 
2889  *                                            << 
2890  * Return: Nothing.                           << 
2891  */                                           << 
2892 static __always_inline void                   << 
2893 raw_atomic64_sub(s64 i, atomic64_t *v)        << 
2894 {                                             << 
2895         arch_atomic64_sub(i, v);              << 
2896 }                                             << 
2897                                                  1600 
2898 /**                                           !! 1601 #ifndef arch_atomic64_sub_return
2899  * raw_atomic64_sub_return() - atomic subtrac << 
2900  * @i: s64 value to subtract                  << 
2901  * @v: pointer to atomic64_t                  << 
2902  *                                            << 
2903  * Atomically updates @v to (@v - @i) with fu << 
2904  *                                            << 
2905  * Safe to use in noinstr code; prefer atomic << 
2906  *                                            << 
2907  * Return: The updated value of @v.           << 
2908  */                                           << 
2909 static __always_inline s64                       1602 static __always_inline s64
2910 raw_atomic64_sub_return(s64 i, atomic64_t *v) !! 1603 arch_atomic64_sub_return(s64 i, atomic64_t *v)
2911 {                                                1604 {
2912 #if defined(arch_atomic64_sub_return)         << 
2913         return arch_atomic64_sub_return(i, v) << 
2914 #elif defined(arch_atomic64_sub_return_relaxe << 
2915         s64 ret;                                 1605         s64 ret;
2916         __atomic_pre_full_fence();               1606         __atomic_pre_full_fence();
2917         ret = arch_atomic64_sub_return_relaxe    1607         ret = arch_atomic64_sub_return_relaxed(i, v);
2918         __atomic_post_full_fence();              1608         __atomic_post_full_fence();
2919         return ret;                              1609         return ret;
2920 #else                                         << 
2921 #error "Unable to define raw_atomic64_sub_ret << 
2922 #endif                                        << 
2923 }                                                1610 }
                                                   >> 1611 #define arch_atomic64_sub_return arch_atomic64_sub_return
                                                   >> 1612 #endif
2924                                                  1613 
2925 /**                                           !! 1614 #endif /* arch_atomic64_sub_return_relaxed */
2926  * raw_atomic64_sub_return_acquire() - atomic !! 1615 
2927  * @i: s64 value to subtract                  !! 1616 #ifndef arch_atomic64_fetch_sub_relaxed
2928  * @v: pointer to atomic64_t                  !! 1617 #define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
2929  *                                            !! 1618 #define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
2930  * Atomically updates @v to (@v - @i) with ac !! 1619 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
2931  *                                            !! 1620 #else /* arch_atomic64_fetch_sub_relaxed */
2932  * Safe to use in noinstr code; prefer atomic !! 1621 
2933  *                                            !! 1622 #ifndef arch_atomic64_fetch_sub_acquire
2934  * Return: The updated value of @v.           << 
2935  */                                           << 
2936 static __always_inline s64                       1623 static __always_inline s64
2937 raw_atomic64_sub_return_acquire(s64 i, atomic !! 1624 arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
2938 {                                                1625 {
2939 #if defined(arch_atomic64_sub_return_acquire) !! 1626         s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
2940         return arch_atomic64_sub_return_acqui << 
2941 #elif defined(arch_atomic64_sub_return_relaxe << 
2942         s64 ret = arch_atomic64_sub_return_re << 
2943         __atomic_acquire_fence();                1627         __atomic_acquire_fence();
2944         return ret;                              1628         return ret;
2945 #elif defined(arch_atomic64_sub_return)       << 
2946         return arch_atomic64_sub_return(i, v) << 
2947 #else                                         << 
2948 #error "Unable to define raw_atomic64_sub_ret << 
2949 #endif                                        << 
2950 }                                                1629 }
                                                   >> 1630 #define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
                                                   >> 1631 #endif
2951                                                  1632 
2952 /**                                           !! 1633 #ifndef arch_atomic64_fetch_sub_release
2953  * raw_atomic64_sub_return_release() - atomic << 
2954  * @i: s64 value to subtract                  << 
2955  * @v: pointer to atomic64_t                  << 
2956  *                                            << 
2957  * Atomically updates @v to (@v - @i) with re << 
2958  *                                            << 
2959  * Safe to use in noinstr code; prefer atomic << 
2960  *                                            << 
2961  * Return: The updated value of @v.           << 
2962  */                                           << 
2963 static __always_inline s64                       1634 static __always_inline s64
2964 raw_atomic64_sub_return_release(s64 i, atomic !! 1635 arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
2965 {                                                1636 {
2966 #if defined(arch_atomic64_sub_return_release) << 
2967         return arch_atomic64_sub_return_relea << 
2968 #elif defined(arch_atomic64_sub_return_relaxe << 
2969         __atomic_release_fence();                1637         __atomic_release_fence();
2970         return arch_atomic64_sub_return_relax !! 1638         return arch_atomic64_fetch_sub_relaxed(i, v);
2971 #elif defined(arch_atomic64_sub_return)       << 
2972         return arch_atomic64_sub_return(i, v) << 
2973 #else                                         << 
2974 #error "Unable to define raw_atomic64_sub_ret << 
2975 #endif                                        << 
2976 }                                                1639 }
2977                                               !! 1640 #define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
2978 /**                                           << 
2979  * raw_atomic64_sub_return_relaxed() - atomic << 
2980  * @i: s64 value to subtract                  << 
2981  * @v: pointer to atomic64_t                  << 
2982  *                                            << 
2983  * Atomically updates @v to (@v - @i) with re << 
2984  *                                            << 
2985  * Safe to use in noinstr code; prefer atomic << 
2986  *                                            << 
2987  * Return: The updated value of @v.           << 
2988  */                                           << 
2989 static __always_inline s64                    << 
2990 raw_atomic64_sub_return_relaxed(s64 i, atomic << 
2991 {                                             << 
2992 #if defined(arch_atomic64_sub_return_relaxed) << 
2993         return arch_atomic64_sub_return_relax << 
2994 #elif defined(arch_atomic64_sub_return)       << 
2995         return arch_atomic64_sub_return(i, v) << 
2996 #else                                         << 
2997 #error "Unable to define raw_atomic64_sub_ret << 
2998 #endif                                           1641 #endif
2999 }                                             << 
3000                                                  1642 
3001 /**                                           !! 1643 #ifndef arch_atomic64_fetch_sub
3002  * raw_atomic64_fetch_sub() - atomic subtract << 
3003  * @i: s64 value to subtract                  << 
3004  * @v: pointer to atomic64_t                  << 
3005  *                                            << 
3006  * Atomically updates @v to (@v - @i) with fu << 
3007  *                                            << 
3008  * Safe to use in noinstr code; prefer atomic << 
3009  *                                            << 
3010  * Return: The original value of @v.          << 
3011  */                                           << 
3012 static __always_inline s64                       1644 static __always_inline s64
3013 raw_atomic64_fetch_sub(s64 i, atomic64_t *v)  !! 1645 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
3014 {                                                1646 {
3015 #if defined(arch_atomic64_fetch_sub)          << 
3016         return arch_atomic64_fetch_sub(i, v); << 
3017 #elif defined(arch_atomic64_fetch_sub_relaxed << 
3018         s64 ret;                                 1647         s64 ret;
3019         __atomic_pre_full_fence();               1648         __atomic_pre_full_fence();
3020         ret = arch_atomic64_fetch_sub_relaxed    1649         ret = arch_atomic64_fetch_sub_relaxed(i, v);
3021         __atomic_post_full_fence();              1650         __atomic_post_full_fence();
3022         return ret;                              1651         return ret;
3023 #else                                         !! 1652 }
3024 #error "Unable to define raw_atomic64_fetch_s !! 1653 #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
3025 #endif                                           1654 #endif
                                                   >> 1655 
                                                   >> 1656 #endif /* arch_atomic64_fetch_sub_relaxed */
                                                   >> 1657 
                                                   >> 1658 #ifndef arch_atomic64_inc
                                                   >> 1659 static __always_inline void
                                                   >> 1660 arch_atomic64_inc(atomic64_t *v)
                                                   >> 1661 {
                                                   >> 1662         arch_atomic64_add(1, v);
3026 }                                                1663 }
                                                   >> 1664 #define arch_atomic64_inc arch_atomic64_inc
                                                   >> 1665 #endif
3027                                                  1666 
3028 /**                                           !! 1667 #ifndef arch_atomic64_inc_return_relaxed
3029  * raw_atomic64_fetch_sub_acquire() - atomic  !! 1668 #ifdef arch_atomic64_inc_return
3030  * @i: s64 value to subtract                  !! 1669 #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
3031  * @v: pointer to atomic64_t                  !! 1670 #define arch_atomic64_inc_return_release arch_atomic64_inc_return
3032  *                                            !! 1671 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
3033  * Atomically updates @v to (@v - @i) with ac !! 1672 #endif /* arch_atomic64_inc_return */
3034  *                                            !! 1673 
3035  * Safe to use in noinstr code; prefer atomic !! 1674 #ifndef arch_atomic64_inc_return
3036  *                                            << 
3037  * Return: The original value of @v.          << 
3038  */                                           << 
3039 static __always_inline s64                       1675 static __always_inline s64
3040 raw_atomic64_fetch_sub_acquire(s64 i, atomic6 !! 1676 arch_atomic64_inc_return(atomic64_t *v)
3041 {                                                1677 {
3042 #if defined(arch_atomic64_fetch_sub_acquire)  !! 1678         return arch_atomic64_add_return(1, v);
3043         return arch_atomic64_fetch_sub_acquir << 
3044 #elif defined(arch_atomic64_fetch_sub_relaxed << 
3045         s64 ret = arch_atomic64_fetch_sub_rel << 
3046         __atomic_acquire_fence();             << 
3047         return ret;                           << 
3048 #elif defined(arch_atomic64_fetch_sub)        << 
3049         return arch_atomic64_fetch_sub(i, v); << 
3050 #else                                         << 
3051 #error "Unable to define raw_atomic64_fetch_s << 
3052 #endif                                        << 
3053 }                                                1679 }
                                                   >> 1680 #define arch_atomic64_inc_return arch_atomic64_inc_return
                                                   >> 1681 #endif
3054                                                  1682 
3055 /**                                           !! 1683 #ifndef arch_atomic64_inc_return_acquire
3056  * raw_atomic64_fetch_sub_release() - atomic  << 
3057  * @i: s64 value to subtract                  << 
3058  * @v: pointer to atomic64_t                  << 
3059  *                                            << 
3060  * Atomically updates @v to (@v - @i) with re << 
3061  *                                            << 
3062  * Safe to use in noinstr code; prefer atomic << 
3063  *                                            << 
3064  * Return: The original value of @v.          << 
3065  */                                           << 
3066 static __always_inline s64                       1684 static __always_inline s64
3067 raw_atomic64_fetch_sub_release(s64 i, atomic6 !! 1685 arch_atomic64_inc_return_acquire(atomic64_t *v)
3068 {                                                1686 {
3069 #if defined(arch_atomic64_fetch_sub_release)  !! 1687         return arch_atomic64_add_return_acquire(1, v);
3070         return arch_atomic64_fetch_sub_releas << 
3071 #elif defined(arch_atomic64_fetch_sub_relaxed << 
3072         __atomic_release_fence();             << 
3073         return arch_atomic64_fetch_sub_relaxe << 
3074 #elif defined(arch_atomic64_fetch_sub)        << 
3075         return arch_atomic64_fetch_sub(i, v); << 
3076 #else                                         << 
3077 #error "Unable to define raw_atomic64_fetch_s << 
3078 #endif                                        << 
3079 }                                                1688 }
                                                   >> 1689 #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
                                                   >> 1690 #endif
3080                                                  1691 
3081 /**                                           !! 1692 #ifndef arch_atomic64_inc_return_release
3082  * raw_atomic64_fetch_sub_relaxed() - atomic  << 
3083  * @i: s64 value to subtract                  << 
3084  * @v: pointer to atomic64_t                  << 
3085  *                                            << 
3086  * Atomically updates @v to (@v - @i) with re << 
3087  *                                            << 
3088  * Safe to use in noinstr code; prefer atomic << 
3089  *                                            << 
3090  * Return: The original value of @v.          << 
3091  */                                           << 
3092 static __always_inline s64                       1693 static __always_inline s64
3093 raw_atomic64_fetch_sub_relaxed(s64 i, atomic6 !! 1694 arch_atomic64_inc_return_release(atomic64_t *v)
3094 {                                                1695 {
3095 #if defined(arch_atomic64_fetch_sub_relaxed)  !! 1696         return arch_atomic64_add_return_release(1, v);
3096         return arch_atomic64_fetch_sub_relaxe !! 1697 }
3097 #elif defined(arch_atomic64_fetch_sub)        !! 1698 #define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
3098         return arch_atomic64_fetch_sub(i, v); << 
3099 #else                                         << 
3100 #error "Unable to define raw_atomic64_fetch_s << 
3101 #endif                                           1699 #endif
                                                   >> 1700 
                                                   >> 1701 #ifndef arch_atomic64_inc_return_relaxed
                                                   >> 1702 static __always_inline s64
                                                   >> 1703 arch_atomic64_inc_return_relaxed(atomic64_t *v)
                                                   >> 1704 {
                                                   >> 1705         return arch_atomic64_add_return_relaxed(1, v);
3102 }                                                1706 }
                                                   >> 1707 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
                                                   >> 1708 #endif
3103                                                  1709 
3104 /**                                           !! 1710 #else /* arch_atomic64_inc_return_relaxed */
3105  * raw_atomic64_inc() - atomic increment with !! 1711 
3106  * @v: pointer to atomic64_t                  !! 1712 #ifndef arch_atomic64_inc_return_acquire
3107  *                                            !! 1713 static __always_inline s64
3108  * Atomically updates @v to (@v + 1) with rel !! 1714 arch_atomic64_inc_return_acquire(atomic64_t *v)
3109  *                                            << 
3110  * Safe to use in noinstr code; prefer atomic << 
3111  *                                            << 
3112  * Return: Nothing.                           << 
3113  */                                           << 
3114 static __always_inline void                   << 
3115 raw_atomic64_inc(atomic64_t *v)               << 
3116 {                                                1715 {
3117 #if defined(arch_atomic64_inc)                !! 1716         s64 ret = arch_atomic64_inc_return_relaxed(v);
3118         arch_atomic64_inc(v);                 !! 1717         __atomic_acquire_fence();
3119 #else                                         !! 1718         return ret;
3120         raw_atomic64_add(1, v);               !! 1719 }
                                                   >> 1720 #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
3121 #endif                                           1721 #endif
                                                   >> 1722 
                                                   >> 1723 #ifndef arch_atomic64_inc_return_release
                                                   >> 1724 static __always_inline s64
                                                   >> 1725 arch_atomic64_inc_return_release(atomic64_t *v)
                                                   >> 1726 {
                                                   >> 1727         __atomic_release_fence();
                                                   >> 1728         return arch_atomic64_inc_return_relaxed(v);
3122 }                                                1729 }
                                                   >> 1730 #define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
                                                   >> 1731 #endif
3123                                                  1732 
3124 /**                                           !! 1733 #ifndef arch_atomic64_inc_return
3125  * raw_atomic64_inc_return() - atomic increme << 
3126  * @v: pointer to atomic64_t                  << 
3127  *                                            << 
3128  * Atomically updates @v to (@v + 1) with ful << 
3129  *                                            << 
3130  * Safe to use in noinstr code; prefer atomic << 
3131  *                                            << 
3132  * Return: The updated value of @v.           << 
3133  */                                           << 
3134 static __always_inline s64                       1734 static __always_inline s64
3135 raw_atomic64_inc_return(atomic64_t *v)        !! 1735 arch_atomic64_inc_return(atomic64_t *v)
3136 {                                                1736 {
3137 #if defined(arch_atomic64_inc_return)         << 
3138         return arch_atomic64_inc_return(v);   << 
3139 #elif defined(arch_atomic64_inc_return_relaxe << 
3140         s64 ret;                                 1737         s64 ret;
3141         __atomic_pre_full_fence();               1738         __atomic_pre_full_fence();
3142         ret = arch_atomic64_inc_return_relaxe    1739         ret = arch_atomic64_inc_return_relaxed(v);
3143         __atomic_post_full_fence();              1740         __atomic_post_full_fence();
3144         return ret;                              1741         return ret;
3145 #else                                         << 
3146         return raw_atomic64_add_return(1, v); << 
3147 #endif                                        << 
3148 }                                                1742 }
                                                   >> 1743 #define arch_atomic64_inc_return arch_atomic64_inc_return
                                                   >> 1744 #endif
3149                                                  1745 
3150 /**                                           !! 1746 #endif /* arch_atomic64_inc_return_relaxed */
3151  * raw_atomic64_inc_return_acquire() - atomic !! 1747 
3152  * @v: pointer to atomic64_t                  !! 1748 #ifndef arch_atomic64_fetch_inc_relaxed
3153  *                                            !! 1749 #ifdef arch_atomic64_fetch_inc
3154  * Atomically updates @v to (@v + 1) with acq !! 1750 #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
3155  *                                            !! 1751 #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
3156  * Safe to use in noinstr code; prefer atomic !! 1752 #define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
3157  *                                            !! 1753 #endif /* arch_atomic64_fetch_inc */
3158  * Return: The updated value of @v.           !! 1754 
3159  */                                           !! 1755 #ifndef arch_atomic64_fetch_inc
3160 static __always_inline s64                       1756 static __always_inline s64
3161 raw_atomic64_inc_return_acquire(atomic64_t *v !! 1757 arch_atomic64_fetch_inc(atomic64_t *v)
3162 {                                                1758 {
3163 #if defined(arch_atomic64_inc_return_acquire) !! 1759         return arch_atomic64_fetch_add(1, v);
3164         return arch_atomic64_inc_return_acqui << 
3165 #elif defined(arch_atomic64_inc_return_relaxe << 
3166         s64 ret = arch_atomic64_inc_return_re << 
3167         __atomic_acquire_fence();             << 
3168         return ret;                           << 
3169 #elif defined(arch_atomic64_inc_return)       << 
3170         return arch_atomic64_inc_return(v);   << 
3171 #else                                         << 
3172         return raw_atomic64_add_return_acquir << 
3173 #endif                                        << 
3174 }                                                1760 }
                                                   >> 1761 #define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
                                                   >> 1762 #endif
3175                                                  1763 
3176 /**                                           !! 1764 #ifndef arch_atomic64_fetch_inc_acquire
3177  * raw_atomic64_inc_return_release() - atomic << 
3178  * @v: pointer to atomic64_t                  << 
3179  *                                            << 
3180  * Atomically updates @v to (@v + 1) with rel << 
3181  *                                            << 
3182  * Safe to use in noinstr code; prefer atomic << 
3183  *                                            << 
3184  * Return: The updated value of @v.           << 
3185  */                                           << 
3186 static __always_inline s64                       1765 static __always_inline s64
3187 raw_atomic64_inc_return_release(atomic64_t *v !! 1766 arch_atomic64_fetch_inc_acquire(atomic64_t *v)
3188 {                                                1767 {
3189 #if defined(arch_atomic64_inc_return_release) !! 1768         return arch_atomic64_fetch_add_acquire(1, v);
3190         return arch_atomic64_inc_return_relea << 
3191 #elif defined(arch_atomic64_inc_return_relaxe << 
3192         __atomic_release_fence();             << 
3193         return arch_atomic64_inc_return_relax << 
3194 #elif defined(arch_atomic64_inc_return)       << 
3195         return arch_atomic64_inc_return(v);   << 
3196 #else                                         << 
3197         return raw_atomic64_add_return_releas << 
3198 #endif                                        << 
3199 }                                                1769 }
                                                   >> 1770 #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
                                                   >> 1771 #endif
3200                                                  1772 
3201 /**                                           !! 1773 #ifndef arch_atomic64_fetch_inc_release
3202  * raw_atomic64_inc_return_relaxed() - atomic << 
3203  * @v: pointer to atomic64_t                  << 
3204  *                                            << 
3205  * Atomically updates @v to (@v + 1) with rel << 
3206  *                                            << 
3207  * Safe to use in noinstr code; prefer atomic << 
3208  *                                            << 
3209  * Return: The updated value of @v.           << 
3210  */                                           << 
3211 static __always_inline s64                       1774 static __always_inline s64
3212 raw_atomic64_inc_return_relaxed(atomic64_t *v !! 1775 arch_atomic64_fetch_inc_release(atomic64_t *v)
3213 {                                                1776 {
3214 #if defined(arch_atomic64_inc_return_relaxed) !! 1777         return arch_atomic64_fetch_add_release(1, v);
3215         return arch_atomic64_inc_return_relax << 
3216 #elif defined(arch_atomic64_inc_return)       << 
3217         return arch_atomic64_inc_return(v);   << 
3218 #else                                         << 
3219         return raw_atomic64_add_return_relaxe << 
3220 #endif                                        << 
3221 }                                                1778 }
                                                   >> 1779 #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
                                                   >> 1780 #endif
3222                                                  1781 
3223 /**                                           !! 1782 #ifndef arch_atomic64_fetch_inc_relaxed
3224  * raw_atomic64_fetch_inc() - atomic incremen << 
3225  * @v: pointer to atomic64_t                  << 
3226  *                                            << 
3227  * Atomically updates @v to (@v + 1) with ful << 
3228  *                                            << 
3229  * Safe to use in noinstr code; prefer atomic << 
3230  *                                            << 
3231  * Return: The original value of @v.          << 
3232  */                                           << 
3233 static __always_inline s64                       1783 static __always_inline s64
3234 raw_atomic64_fetch_inc(atomic64_t *v)         !! 1784 arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
3235 {                                                1785 {
3236 #if defined(arch_atomic64_fetch_inc)          !! 1786         return arch_atomic64_fetch_add_relaxed(1, v);
3237         return arch_atomic64_fetch_inc(v);    << 
3238 #elif defined(arch_atomic64_fetch_inc_relaxed << 
3239         s64 ret;                              << 
3240         __atomic_pre_full_fence();            << 
3241         ret = arch_atomic64_fetch_inc_relaxed << 
3242         __atomic_post_full_fence();           << 
3243         return ret;                           << 
3244 #else                                         << 
3245         return raw_atomic64_fetch_add(1, v);  << 
3246 #endif                                        << 
3247 }                                                1787 }
                                                   >> 1788 #define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
                                                   >> 1789 #endif
3248                                                  1790 
3249 /**                                           !! 1791 #else /* arch_atomic64_fetch_inc_relaxed */
3250  * raw_atomic64_fetch_inc_acquire() - atomic  !! 1792 
3251  * @v: pointer to atomic64_t                  !! 1793 #ifndef arch_atomic64_fetch_inc_acquire
3252  *                                            << 
3253  * Atomically updates @v to (@v + 1) with acq << 
3254  *                                            << 
3255  * Safe to use in noinstr code; prefer atomic << 
3256  *                                            << 
3257  * Return: The original value of @v.          << 
3258  */                                           << 
3259 static __always_inline s64                       1794 static __always_inline s64
3260 raw_atomic64_fetch_inc_acquire(atomic64_t *v) !! 1795 arch_atomic64_fetch_inc_acquire(atomic64_t *v)
3261 {                                                1796 {
3262 #if defined(arch_atomic64_fetch_inc_acquire)  << 
3263         return arch_atomic64_fetch_inc_acquir << 
3264 #elif defined(arch_atomic64_fetch_inc_relaxed << 
3265         s64 ret = arch_atomic64_fetch_inc_rel    1797         s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3266         __atomic_acquire_fence();                1798         __atomic_acquire_fence();
3267         return ret;                              1799         return ret;
3268 #elif defined(arch_atomic64_fetch_inc)        << 
3269         return arch_atomic64_fetch_inc(v);    << 
3270 #else                                         << 
3271         return raw_atomic64_fetch_add_acquire << 
3272 #endif                                        << 
3273 }                                                1800 }
                                                   >> 1801 #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
                                                   >> 1802 #endif
3274                                                  1803 
3275 /**                                           !! 1804 #ifndef arch_atomic64_fetch_inc_release
3276  * raw_atomic64_fetch_inc_release() - atomic  << 
3277  * @v: pointer to atomic64_t                  << 
3278  *                                            << 
3279  * Atomically updates @v to (@v + 1) with rel << 
3280  *                                            << 
3281  * Safe to use in noinstr code; prefer atomic << 
3282  *                                            << 
3283  * Return: The original value of @v.          << 
3284  */                                           << 
3285 static __always_inline s64                       1805 static __always_inline s64
3286 raw_atomic64_fetch_inc_release(atomic64_t *v) !! 1806 arch_atomic64_fetch_inc_release(atomic64_t *v)
3287 {                                                1807 {
3288 #if defined(arch_atomic64_fetch_inc_release)  << 
3289         return arch_atomic64_fetch_inc_releas << 
3290 #elif defined(arch_atomic64_fetch_inc_relaxed << 
3291         __atomic_release_fence();                1808         __atomic_release_fence();
3292         return arch_atomic64_fetch_inc_relaxe    1809         return arch_atomic64_fetch_inc_relaxed(v);
3293 #elif defined(arch_atomic64_fetch_inc)        << 
3294         return arch_atomic64_fetch_inc(v);    << 
3295 #else                                         << 
3296         return raw_atomic64_fetch_add_release << 
3297 #endif                                        << 
3298 }                                                1810 }
                                                   >> 1811 #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
                                                   >> 1812 #endif
3299                                                  1813 
3300 /**                                           !! 1814 #ifndef arch_atomic64_fetch_inc
3301  * raw_atomic64_fetch_inc_relaxed() - atomic  << 
3302  * @v: pointer to atomic64_t                  << 
3303  *                                            << 
3304  * Atomically updates @v to (@v + 1) with rel << 
3305  *                                            << 
3306  * Safe to use in noinstr code; prefer atomic << 
3307  *                                            << 
3308  * Return: The original value of @v.          << 
3309  */                                           << 
3310 static __always_inline s64                       1815 static __always_inline s64
3311 raw_atomic64_fetch_inc_relaxed(atomic64_t *v) !! 1816 arch_atomic64_fetch_inc(atomic64_t *v)
3312 {                                                1817 {
3313 #if defined(arch_atomic64_fetch_inc_relaxed)  !! 1818         s64 ret;
3314         return arch_atomic64_fetch_inc_relaxe !! 1819         __atomic_pre_full_fence();
3315 #elif defined(arch_atomic64_fetch_inc)        !! 1820         ret = arch_atomic64_fetch_inc_relaxed(v);
3316         return arch_atomic64_fetch_inc(v);    !! 1821         __atomic_post_full_fence();
3317 #else                                         !! 1822         return ret;
3318         return raw_atomic64_fetch_add_relaxed << 
3319 #endif                                        << 
3320 }                                                1823 }
                                                   >> 1824 #define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
                                                   >> 1825 #endif
3321                                                  1826 
3322 /**                                           !! 1827 #endif /* arch_atomic64_fetch_inc_relaxed */
3323  * raw_atomic64_dec() - atomic decrement with !! 1828 
3324  * @v: pointer to atomic64_t                  !! 1829 #ifndef arch_atomic64_dec
3325  *                                            << 
3326  * Atomically updates @v to (@v - 1) with rel << 
3327  *                                            << 
3328  * Safe to use in noinstr code; prefer atomic << 
3329  *                                            << 
3330  * Return: Nothing.                           << 
3331  */                                           << 
3332 static __always_inline void                      1830 static __always_inline void
3333 raw_atomic64_dec(atomic64_t *v)               !! 1831 arch_atomic64_dec(atomic64_t *v)
                                                   >> 1832 {
                                                   >> 1833         arch_atomic64_sub(1, v);
                                                   >> 1834 }
                                                   >> 1835 #define arch_atomic64_dec arch_atomic64_dec
                                                   >> 1836 #endif
                                                   >> 1837 
                                                   >> 1838 #ifndef arch_atomic64_dec_return_relaxed
                                                   >> 1839 #ifdef arch_atomic64_dec_return
                                                   >> 1840 #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
                                                   >> 1841 #define arch_atomic64_dec_return_release arch_atomic64_dec_return
                                                   >> 1842 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
                                                   >> 1843 #endif /* arch_atomic64_dec_return */
                                                   >> 1844 
                                                   >> 1845 #ifndef arch_atomic64_dec_return
                                                   >> 1846 static __always_inline s64
                                                   >> 1847 arch_atomic64_dec_return(atomic64_t *v)
3334 {                                                1848 {
3335 #if defined(arch_atomic64_dec)                !! 1849         return arch_atomic64_sub_return(1, v);
3336         arch_atomic64_dec(v);                 !! 1850 }
3337 #else                                         !! 1851 #define arch_atomic64_dec_return arch_atomic64_dec_return
3338         raw_atomic64_sub(1, v);               << 
3339 #endif                                           1852 #endif
                                                   >> 1853 
                                                   >> 1854 #ifndef arch_atomic64_dec_return_acquire
                                                   >> 1855 static __always_inline s64
                                                   >> 1856 arch_atomic64_dec_return_acquire(atomic64_t *v)
                                                   >> 1857 {
                                                   >> 1858         return arch_atomic64_sub_return_acquire(1, v);
3340 }                                                1859 }
                                                   >> 1860 #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
                                                   >> 1861 #endif
3341                                                  1862 
3342 /**                                           !! 1863 #ifndef arch_atomic64_dec_return_release
3343  * raw_atomic64_dec_return() - atomic decreme << 
3344  * @v: pointer to atomic64_t                  << 
3345  *                                            << 
3346  * Atomically updates @v to (@v - 1) with ful << 
3347  *                                            << 
3348  * Safe to use in noinstr code; prefer atomic << 
3349  *                                            << 
3350  * Return: The updated value of @v.           << 
3351  */                                           << 
3352 static __always_inline s64                       1864 static __always_inline s64
3353 raw_atomic64_dec_return(atomic64_t *v)        !! 1865 arch_atomic64_dec_return_release(atomic64_t *v)
3354 {                                                1866 {
3355 #if defined(arch_atomic64_dec_return)         !! 1867         return arch_atomic64_sub_return_release(1, v);
3356         return arch_atomic64_dec_return(v);   !! 1868 }
3357 #elif defined(arch_atomic64_dec_return_relaxe !! 1869 #define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
3358         s64 ret;                              << 
3359         __atomic_pre_full_fence();            << 
3360         ret = arch_atomic64_dec_return_relaxe << 
3361         __atomic_post_full_fence();           << 
3362         return ret;                           << 
3363 #else                                         << 
3364         return raw_atomic64_sub_return(1, v); << 
3365 #endif                                           1870 #endif
                                                   >> 1871 
                                                   >> 1872 #ifndef arch_atomic64_dec_return_relaxed
                                                   >> 1873 static __always_inline s64
                                                   >> 1874 arch_atomic64_dec_return_relaxed(atomic64_t *v)
                                                   >> 1875 {
                                                   >> 1876         return arch_atomic64_sub_return_relaxed(1, v);
3366 }                                                1877 }
                                                   >> 1878 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
                                                   >> 1879 #endif
3367                                                  1880 
3368 /**                                           !! 1881 #else /* arch_atomic64_dec_return_relaxed */
3369  * raw_atomic64_dec_return_acquire() - atomic !! 1882 
3370  * @v: pointer to atomic64_t                  !! 1883 #ifndef arch_atomic64_dec_return_acquire
3371  *                                            << 
3372  * Atomically updates @v to (@v - 1) with acq << 
3373  *                                            << 
3374  * Safe to use in noinstr code; prefer atomic << 
3375  *                                            << 
3376  * Return: The updated value of @v.           << 
3377  */                                           << 
3378 static __always_inline s64                       1884 static __always_inline s64
3379 raw_atomic64_dec_return_acquire(atomic64_t *v !! 1885 arch_atomic64_dec_return_acquire(atomic64_t *v)
3380 {                                                1886 {
3381 #if defined(arch_atomic64_dec_return_acquire) << 
3382         return arch_atomic64_dec_return_acqui << 
3383 #elif defined(arch_atomic64_dec_return_relaxe << 
3384         s64 ret = arch_atomic64_dec_return_re    1887         s64 ret = arch_atomic64_dec_return_relaxed(v);
3385         __atomic_acquire_fence();                1888         __atomic_acquire_fence();
3386         return ret;                              1889         return ret;
3387 #elif defined(arch_atomic64_dec_return)       << 
3388         return arch_atomic64_dec_return(v);   << 
3389 #else                                         << 
3390         return raw_atomic64_sub_return_acquir << 
3391 #endif                                        << 
3392 }                                                1890 }
                                                   >> 1891 #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
                                                   >> 1892 #endif
3393                                                  1893 
3394 /**                                           !! 1894 #ifndef arch_atomic64_dec_return_release
3395  * raw_atomic64_dec_return_release() - atomic << 
3396  * @v: pointer to atomic64_t                  << 
3397  *                                            << 
3398  * Atomically updates @v to (@v - 1) with rel << 
3399  *                                            << 
3400  * Safe to use in noinstr code; prefer atomic << 
3401  *                                            << 
3402  * Return: The updated value of @v.           << 
3403  */                                           << 
3404 static __always_inline s64                       1895 static __always_inline s64
3405 raw_atomic64_dec_return_release(atomic64_t *v !! 1896 arch_atomic64_dec_return_release(atomic64_t *v)
3406 {                                                1897 {
3407 #if defined(arch_atomic64_dec_return_release) << 
3408         return arch_atomic64_dec_return_relea << 
3409 #elif defined(arch_atomic64_dec_return_relaxe << 
3410         __atomic_release_fence();                1898         __atomic_release_fence();
3411         return arch_atomic64_dec_return_relax    1899         return arch_atomic64_dec_return_relaxed(v);
3412 #elif defined(arch_atomic64_dec_return)       << 
3413         return arch_atomic64_dec_return(v);   << 
3414 #else                                         << 
3415         return raw_atomic64_sub_return_releas << 
3416 #endif                                        << 
3417 }                                                1900 }
3418                                               !! 1901 #define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
3419 /**                                           << 
3420  * raw_atomic64_dec_return_relaxed() - atomic << 
3421  * @v: pointer to atomic64_t                  << 
3422  *                                            << 
3423  * Atomically updates @v to (@v - 1) with rel << 
3424  *                                            << 
3425  * Safe to use in noinstr code; prefer atomic << 
3426  *                                            << 
3427  * Return: The updated value of @v.           << 
3428  */                                           << 
3429 static __always_inline s64                    << 
3430 raw_atomic64_dec_return_relaxed(atomic64_t *v << 
3431 {                                             << 
3432 #if defined(arch_atomic64_dec_return_relaxed) << 
3433         return arch_atomic64_dec_return_relax << 
3434 #elif defined(arch_atomic64_dec_return)       << 
3435         return arch_atomic64_dec_return(v);   << 
3436 #else                                         << 
3437         return raw_atomic64_sub_return_relaxe << 
3438 #endif                                           1902 #endif
3439 }                                             << 
3440                                                  1903 
3441 /**                                           !! 1904 #ifndef arch_atomic64_dec_return
3442  * raw_atomic64_fetch_dec() - atomic decremen << 
3443  * @v: pointer to atomic64_t                  << 
3444  *                                            << 
3445  * Atomically updates @v to (@v - 1) with ful << 
3446  *                                            << 
3447  * Safe to use in noinstr code; prefer atomic << 
3448  *                                            << 
3449  * Return: The original value of @v.          << 
3450  */                                           << 
3451 static __always_inline s64                       1905 static __always_inline s64
3452 raw_atomic64_fetch_dec(atomic64_t *v)         !! 1906 arch_atomic64_dec_return(atomic64_t *v)
3453 {                                                1907 {
3454 #if defined(arch_atomic64_fetch_dec)          << 
3455         return arch_atomic64_fetch_dec(v);    << 
3456 #elif defined(arch_atomic64_fetch_dec_relaxed << 
3457         s64 ret;                                 1908         s64 ret;
3458         __atomic_pre_full_fence();               1909         __atomic_pre_full_fence();
3459         ret = arch_atomic64_fetch_dec_relaxed !! 1910         ret = arch_atomic64_dec_return_relaxed(v);
3460         __atomic_post_full_fence();              1911         __atomic_post_full_fence();
3461         return ret;                              1912         return ret;
3462 #else                                         << 
3463         return raw_atomic64_fetch_sub(1, v);  << 
3464 #endif                                        << 
3465 }                                                1913 }
                                                   >> 1914 #define arch_atomic64_dec_return arch_atomic64_dec_return
                                                   >> 1915 #endif
3466                                                  1916 
3467 /**                                           !! 1917 #endif /* arch_atomic64_dec_return_relaxed */
3468  * raw_atomic64_fetch_dec_acquire() - atomic  !! 1918 
3469  * @v: pointer to atomic64_t                  !! 1919 #ifndef arch_atomic64_fetch_dec_relaxed
3470  *                                            !! 1920 #ifdef arch_atomic64_fetch_dec
3471  * Atomically updates @v to (@v - 1) with acq !! 1921 #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
3472  *                                            !! 1922 #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
3473  * Safe to use in noinstr code; prefer atomic !! 1923 #define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
3474  *                                            !! 1924 #endif /* arch_atomic64_fetch_dec */
3475  * Return: The original value of @v.          !! 1925 
3476  */                                           !! 1926 #ifndef arch_atomic64_fetch_dec
3477 static __always_inline s64                       1927 static __always_inline s64
3478 raw_atomic64_fetch_dec_acquire(atomic64_t *v) !! 1928 arch_atomic64_fetch_dec(atomic64_t *v)
3479 {                                                1929 {
3480 #if defined(arch_atomic64_fetch_dec_acquire)  !! 1930         return arch_atomic64_fetch_sub(1, v);
3481         return arch_atomic64_fetch_dec_acquir << 
3482 #elif defined(arch_atomic64_fetch_dec_relaxed << 
3483         s64 ret = arch_atomic64_fetch_dec_rel << 
3484         __atomic_acquire_fence();             << 
3485         return ret;                           << 
3486 #elif defined(arch_atomic64_fetch_dec)        << 
3487         return arch_atomic64_fetch_dec(v);    << 
3488 #else                                         << 
3489         return raw_atomic64_fetch_sub_acquire << 
3490 #endif                                        << 
3491 }                                                1931 }
                                                   >> 1932 #define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
                                                   >> 1933 #endif
3492                                                  1934 
3493 /**                                           !! 1935 #ifndef arch_atomic64_fetch_dec_acquire
3494  * raw_atomic64_fetch_dec_release() - atomic  << 
3495  * @v: pointer to atomic64_t                  << 
3496  *                                            << 
3497  * Atomically updates @v to (@v - 1) with rel << 
3498  *                                            << 
3499  * Safe to use in noinstr code; prefer atomic << 
3500  *                                            << 
3501  * Return: The original value of @v.          << 
3502  */                                           << 
3503 static __always_inline s64                       1936 static __always_inline s64
3504 raw_atomic64_fetch_dec_release(atomic64_t *v) !! 1937 arch_atomic64_fetch_dec_acquire(atomic64_t *v)
3505 {                                                1938 {
3506 #if defined(arch_atomic64_fetch_dec_release)  !! 1939         return arch_atomic64_fetch_sub_acquire(1, v);
3507         return arch_atomic64_fetch_dec_releas !! 1940 }
3508 #elif defined(arch_atomic64_fetch_dec_relaxed !! 1941 #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
3509         __atomic_release_fence();             << 
3510         return arch_atomic64_fetch_dec_relaxe << 
3511 #elif defined(arch_atomic64_fetch_dec)        << 
3512         return arch_atomic64_fetch_dec(v);    << 
3513 #else                                         << 
3514         return raw_atomic64_fetch_sub_release << 
3515 #endif                                           1942 #endif
                                                   >> 1943 
                                                   >> 1944 #ifndef arch_atomic64_fetch_dec_release
                                                   >> 1945 static __always_inline s64
                                                   >> 1946 arch_atomic64_fetch_dec_release(atomic64_t *v)
                                                   >> 1947 {
                                                   >> 1948         return arch_atomic64_fetch_sub_release(1, v);
3516 }                                                1949 }
                                                   >> 1950 #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
                                                   >> 1951 #endif
3517                                                  1952 
3518 /**                                           !! 1953 #ifndef arch_atomic64_fetch_dec_relaxed
3519  * raw_atomic64_fetch_dec_relaxed() - atomic  << 
3520  * @v: pointer to atomic64_t                  << 
3521  *                                            << 
3522  * Atomically updates @v to (@v - 1) with rel << 
3523  *                                            << 
3524  * Safe to use in noinstr code; prefer atomic << 
3525  *                                            << 
3526  * Return: The original value of @v.          << 
3527  */                                           << 
3528 static __always_inline s64                       1954 static __always_inline s64
3529 raw_atomic64_fetch_dec_relaxed(atomic64_t *v) !! 1955 arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
3530 {                                                1956 {
3531 #if defined(arch_atomic64_fetch_dec_relaxed)  !! 1957         return arch_atomic64_fetch_sub_relaxed(1, v);
3532         return arch_atomic64_fetch_dec_relaxe !! 1958 }
3533 #elif defined(arch_atomic64_fetch_dec)        !! 1959 #define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
3534         return arch_atomic64_fetch_dec(v);    << 
3535 #else                                         << 
3536         return raw_atomic64_fetch_sub_relaxed << 
3537 #endif                                           1960 #endif
                                                   >> 1961 
                                                   >> 1962 #else /* arch_atomic64_fetch_dec_relaxed */
                                                   >> 1963 
                                                   >> 1964 #ifndef arch_atomic64_fetch_dec_acquire
                                                   >> 1965 static __always_inline s64
                                                   >> 1966 arch_atomic64_fetch_dec_acquire(atomic64_t *v)
                                                   >> 1967 {
                                                   >> 1968         s64 ret = arch_atomic64_fetch_dec_relaxed(v);
                                                   >> 1969         __atomic_acquire_fence();
                                                   >> 1970         return ret;
3538 }                                                1971 }
                                                   >> 1972 #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
                                                   >> 1973 #endif
3539                                                  1974 
3540 /**                                           !! 1975 #ifndef arch_atomic64_fetch_dec_release
3541  * raw_atomic64_and() - atomic bitwise AND wi !! 1976 static __always_inline s64
3542  * @i: s64 value                              !! 1977 arch_atomic64_fetch_dec_release(atomic64_t *v)
3543  * @v: pointer to atomic64_t                  << 
3544  *                                            << 
3545  * Atomically updates @v to (@v & @i) with re << 
3546  *                                            << 
3547  * Safe to use in noinstr code; prefer atomic << 
3548  *                                            << 
3549  * Return: Nothing.                           << 
3550  */                                           << 
3551 static __always_inline void                   << 
3552 raw_atomic64_and(s64 i, atomic64_t *v)        << 
3553 {                                                1978 {
3554         arch_atomic64_and(i, v);              !! 1979         __atomic_release_fence();
                                                   >> 1980         return arch_atomic64_fetch_dec_relaxed(v);
3555 }                                                1981 }
                                                   >> 1982 #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
                                                   >> 1983 #endif
3556                                                  1984 
3557 /**                                           !! 1985 #ifndef arch_atomic64_fetch_dec
3558  * raw_atomic64_fetch_and() - atomic bitwise  << 
3559  * @i: s64 value                              << 
3560  * @v: pointer to atomic64_t                  << 
3561  *                                            << 
3562  * Atomically updates @v to (@v & @i) with fu << 
3563  *                                            << 
3564  * Safe to use in noinstr code; prefer atomic << 
3565  *                                            << 
3566  * Return: The original value of @v.          << 
3567  */                                           << 
3568 static __always_inline s64                       1986 static __always_inline s64
3569 raw_atomic64_fetch_and(s64 i, atomic64_t *v)  !! 1987 arch_atomic64_fetch_dec(atomic64_t *v)
3570 {                                                1988 {
3571 #if defined(arch_atomic64_fetch_and)          << 
3572         return arch_atomic64_fetch_and(i, v); << 
3573 #elif defined(arch_atomic64_fetch_and_relaxed << 
3574         s64 ret;                                 1989         s64 ret;
3575         __atomic_pre_full_fence();               1990         __atomic_pre_full_fence();
3576         ret = arch_atomic64_fetch_and_relaxed !! 1991         ret = arch_atomic64_fetch_dec_relaxed(v);
3577         __atomic_post_full_fence();              1992         __atomic_post_full_fence();
3578         return ret;                              1993         return ret;
3579 #else                                         << 
3580 #error "Unable to define raw_atomic64_fetch_a << 
3581 #endif                                        << 
3582 }                                                1994 }
                                                   >> 1995 #define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
                                                   >> 1996 #endif
3583                                                  1997 
3584 /**                                           !! 1998 #endif /* arch_atomic64_fetch_dec_relaxed */
3585  * raw_atomic64_fetch_and_acquire() - atomic  !! 1999 
3586  * @i: s64 value                              !! 2000 #ifndef arch_atomic64_fetch_and_relaxed
3587  * @v: pointer to atomic64_t                  !! 2001 #define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
3588  *                                            !! 2002 #define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
3589  * Atomically updates @v to (@v & @i) with ac !! 2003 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
3590  *                                            !! 2004 #else /* arch_atomic64_fetch_and_relaxed */
3591  * Safe to use in noinstr code; prefer atomic !! 2005 
3592  *                                            !! 2006 #ifndef arch_atomic64_fetch_and_acquire
3593  * Return: The original value of @v.          << 
3594  */                                           << 
3595 static __always_inline s64                       2007 static __always_inline s64
3596 raw_atomic64_fetch_and_acquire(s64 i, atomic6 !! 2008 arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3597 {                                                2009 {
3598 #if defined(arch_atomic64_fetch_and_acquire)  << 
3599         return arch_atomic64_fetch_and_acquir << 
3600 #elif defined(arch_atomic64_fetch_and_relaxed << 
3601         s64 ret = arch_atomic64_fetch_and_rel    2010         s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3602         __atomic_acquire_fence();                2011         __atomic_acquire_fence();
3603         return ret;                              2012         return ret;
3604 #elif defined(arch_atomic64_fetch_and)        << 
3605         return arch_atomic64_fetch_and(i, v); << 
3606 #else                                         << 
3607 #error "Unable to define raw_atomic64_fetch_a << 
3608 #endif                                        << 
3609 }                                                2013 }
                                                   >> 2014 #define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
                                                   >> 2015 #endif
3610                                                  2016 
3611 /**                                           !! 2017 #ifndef arch_atomic64_fetch_and_release
3612  * raw_atomic64_fetch_and_release() - atomic  << 
3613  * @i: s64 value                              << 
3614  * @v: pointer to atomic64_t                  << 
3615  *                                            << 
3616  * Atomically updates @v to (@v & @i) with re << 
3617  *                                            << 
3618  * Safe to use in noinstr code; prefer atomic << 
3619  *                                            << 
3620  * Return: The original value of @v.          << 
3621  */                                           << 
3622 static __always_inline s64                       2018 static __always_inline s64
3623 raw_atomic64_fetch_and_release(s64 i, atomic6 !! 2019 arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3624 {                                                2020 {
3625 #if defined(arch_atomic64_fetch_and_release)  << 
3626         return arch_atomic64_fetch_and_releas << 
3627 #elif defined(arch_atomic64_fetch_and_relaxed << 
3628         __atomic_release_fence();                2021         __atomic_release_fence();
3629         return arch_atomic64_fetch_and_relaxe    2022         return arch_atomic64_fetch_and_relaxed(i, v);
3630 #elif defined(arch_atomic64_fetch_and)        << 
3631         return arch_atomic64_fetch_and(i, v); << 
3632 #else                                         << 
3633 #error "Unable to define raw_atomic64_fetch_a << 
3634 #endif                                        << 
3635 }                                                2023 }
                                                   >> 2024 #define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
                                                   >> 2025 #endif
3636                                                  2026 
3637 /**                                           !! 2027 #ifndef arch_atomic64_fetch_and
3638  * raw_atomic64_fetch_and_relaxed() - atomic  << 
3639  * @i: s64 value                              << 
3640  * @v: pointer to atomic64_t                  << 
3641  *                                            << 
3642  * Atomically updates @v to (@v & @i) with re << 
3643  *                                            << 
3644  * Safe to use in noinstr code; prefer atomic << 
3645  *                                            << 
3646  * Return: The original value of @v.          << 
3647  */                                           << 
3648 static __always_inline s64                       2028 static __always_inline s64
3649 raw_atomic64_fetch_and_relaxed(s64 i, atomic6 !! 2029 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
3650 {                                                2030 {
3651 #if defined(arch_atomic64_fetch_and_relaxed)  !! 2031         s64 ret;
3652         return arch_atomic64_fetch_and_relaxe !! 2032         __atomic_pre_full_fence();
3653 #elif defined(arch_atomic64_fetch_and)        !! 2033         ret = arch_atomic64_fetch_and_relaxed(i, v);
3654         return arch_atomic64_fetch_and(i, v); !! 2034         __atomic_post_full_fence();
3655 #else                                         !! 2035         return ret;
3656 #error "Unable to define raw_atomic64_fetch_a << 
3657 #endif                                        << 
3658 }                                                2036 }
                                                   >> 2037 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
                                                   >> 2038 #endif
3659                                                  2039 
3660 /**                                           !! 2040 #endif /* arch_atomic64_fetch_and_relaxed */
3661  * raw_atomic64_andnot() - atomic bitwise AND !! 2041 
3662  * @i: s64 value                              !! 2042 #ifndef arch_atomic64_andnot
3663  * @v: pointer to atomic64_t                  << 
3664  *                                            << 
3665  * Atomically updates @v to (@v & ~@i) with r << 
3666  *                                            << 
3667  * Safe to use in noinstr code; prefer atomic << 
3668  *                                            << 
3669  * Return: Nothing.                           << 
3670  */                                           << 
3671 static __always_inline void                      2043 static __always_inline void
3672 raw_atomic64_andnot(s64 i, atomic64_t *v)     !! 2044 arch_atomic64_andnot(s64 i, atomic64_t *v)
3673 {                                                2045 {
3674 #if defined(arch_atomic64_andnot)             !! 2046         arch_atomic64_and(~i, v);
3675         arch_atomic64_andnot(i, v);           << 
3676 #else                                         << 
3677         raw_atomic64_and(~i, v);              << 
3678 #endif                                        << 
3679 }                                                2047 }
                                                   >> 2048 #define arch_atomic64_andnot arch_atomic64_andnot
                                                   >> 2049 #endif
3680                                                  2050 
3681 /**                                           !! 2051 #ifndef arch_atomic64_fetch_andnot_relaxed
3682  * raw_atomic64_fetch_andnot() - atomic bitwi !! 2052 #ifdef arch_atomic64_fetch_andnot
3683  * @i: s64 value                              !! 2053 #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
3684  * @v: pointer to atomic64_t                  !! 2054 #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
3685  *                                            !! 2055 #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
3686  * Atomically updates @v to (@v & ~@i) with f !! 2056 #endif /* arch_atomic64_fetch_andnot */
3687  *                                            !! 2057 
3688  * Safe to use in noinstr code; prefer atomic !! 2058 #ifndef arch_atomic64_fetch_andnot
3689  *                                            << 
3690  * Return: The original value of @v.          << 
3691  */                                           << 
3692 static __always_inline s64                       2059 static __always_inline s64
3693 raw_atomic64_fetch_andnot(s64 i, atomic64_t * !! 2060 arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3694 {                                                2061 {
3695 #if defined(arch_atomic64_fetch_andnot)       !! 2062         return arch_atomic64_fetch_and(~i, v);
3696         return arch_atomic64_fetch_andnot(i,  << 
3697 #elif defined(arch_atomic64_fetch_andnot_rela << 
3698         s64 ret;                              << 
3699         __atomic_pre_full_fence();            << 
3700         ret = arch_atomic64_fetch_andnot_rela << 
3701         __atomic_post_full_fence();           << 
3702         return ret;                           << 
3703 #else                                         << 
3704         return raw_atomic64_fetch_and(~i, v); << 
3705 #endif                                        << 
3706 }                                                2063 }
                                                   >> 2064 #define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
                                                   >> 2065 #endif
3707                                                  2066 
3708 /**                                           !! 2067 #ifndef arch_atomic64_fetch_andnot_acquire
3709  * raw_atomic64_fetch_andnot_acquire() - atom << 
3710  * @i: s64 value                              << 
3711  * @v: pointer to atomic64_t                  << 
3712  *                                            << 
3713  * Atomically updates @v to (@v & ~@i) with a << 
3714  *                                            << 
3715  * Safe to use in noinstr code; prefer atomic << 
3716  *                                            << 
3717  * Return: The original value of @v.          << 
3718  */                                           << 
3719 static __always_inline s64                       2068 static __always_inline s64
3720 raw_atomic64_fetch_andnot_acquire(s64 i, atom !! 2069 arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3721 {                                                2070 {
3722 #if defined(arch_atomic64_fetch_andnot_acquir !! 2071         return arch_atomic64_fetch_and_acquire(~i, v);
3723         return arch_atomic64_fetch_andnot_acq << 
3724 #elif defined(arch_atomic64_fetch_andnot_rela << 
3725         s64 ret = arch_atomic64_fetch_andnot_ << 
3726         __atomic_acquire_fence();             << 
3727         return ret;                           << 
3728 #elif defined(arch_atomic64_fetch_andnot)     << 
3729         return arch_atomic64_fetch_andnot(i,  << 
3730 #else                                         << 
3731         return raw_atomic64_fetch_and_acquire << 
3732 #endif                                        << 
3733 }                                                2072 }
                                                   >> 2073 #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
                                                   >> 2074 #endif
3734                                                  2075 
3735 /**                                           !! 2076 #ifndef arch_atomic64_fetch_andnot_release
3736  * raw_atomic64_fetch_andnot_release() - atom << 
3737  * @i: s64 value                              << 
3738  * @v: pointer to atomic64_t                  << 
3739  *                                            << 
3740  * Atomically updates @v to (@v & ~@i) with r << 
3741  *                                            << 
3742  * Safe to use in noinstr code; prefer atomic << 
3743  *                                            << 
3744  * Return: The original value of @v.          << 
3745  */                                           << 
3746 static __always_inline s64                       2077 static __always_inline s64
3747 raw_atomic64_fetch_andnot_release(s64 i, atom !! 2078 arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3748 {                                                2079 {
3749 #if defined(arch_atomic64_fetch_andnot_releas !! 2080         return arch_atomic64_fetch_and_release(~i, v);
3750         return arch_atomic64_fetch_andnot_rel << 
3751 #elif defined(arch_atomic64_fetch_andnot_rela << 
3752         __atomic_release_fence();             << 
3753         return arch_atomic64_fetch_andnot_rel << 
3754 #elif defined(arch_atomic64_fetch_andnot)     << 
3755         return arch_atomic64_fetch_andnot(i,  << 
3756 #else                                         << 
3757         return raw_atomic64_fetch_and_release << 
3758 #endif                                        << 
3759 }                                                2081 }
                                                   >> 2082 #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
                                                   >> 2083 #endif
3760                                                  2084 
3761 /**                                           !! 2085 #ifndef arch_atomic64_fetch_andnot_relaxed
3762  * raw_atomic64_fetch_andnot_relaxed() - atom << 
3763  * @i: s64 value                              << 
3764  * @v: pointer to atomic64_t                  << 
3765  *                                            << 
3766  * Atomically updates @v to (@v & ~@i) with r << 
3767  *                                            << 
3768  * Safe to use in noinstr code; prefer atomic << 
3769  *                                            << 
3770  * Return: The original value of @v.          << 
3771  */                                           << 
3772 static __always_inline s64                       2086 static __always_inline s64
3773 raw_atomic64_fetch_andnot_relaxed(s64 i, atom !! 2087 arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
3774 {                                                2088 {
3775 #if defined(arch_atomic64_fetch_andnot_relaxe !! 2089         return arch_atomic64_fetch_and_relaxed(~i, v);
3776         return arch_atomic64_fetch_andnot_rel !! 2090 }
3777 #elif defined(arch_atomic64_fetch_andnot)     !! 2091 #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
3778         return arch_atomic64_fetch_andnot(i,  << 
3779 #else                                         << 
3780         return raw_atomic64_fetch_and_relaxed << 
3781 #endif                                           2092 #endif
                                                   >> 2093 
                                                   >> 2094 #else /* arch_atomic64_fetch_andnot_relaxed */
                                                   >> 2095 
                                                   >> 2096 #ifndef arch_atomic64_fetch_andnot_acquire
                                                   >> 2097 static __always_inline s64
                                                   >> 2098 arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
                                                   >> 2099 {
                                                   >> 2100         s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
                                                   >> 2101         __atomic_acquire_fence();
                                                   >> 2102         return ret;
3782 }                                                2103 }
                                                   >> 2104 #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
                                                   >> 2105 #endif
3783                                                  2106 
3784 /**                                           !! 2107 #ifndef arch_atomic64_fetch_andnot_release
3785  * raw_atomic64_or() - atomic bitwise OR with !! 2108 static __always_inline s64
3786  * @i: s64 value                              !! 2109 arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3787  * @v: pointer to atomic64_t                  << 
3788  *                                            << 
3789  * Atomically updates @v to (@v | @i) with re << 
3790  *                                            << 
3791  * Safe to use in noinstr code; prefer atomic << 
3792  *                                            << 
3793  * Return: Nothing.                           << 
3794  */                                           << 
3795 static __always_inline void                   << 
3796 raw_atomic64_or(s64 i, atomic64_t *v)         << 
3797 {                                                2110 {
3798         arch_atomic64_or(i, v);               !! 2111         __atomic_release_fence();
                                                   >> 2112         return arch_atomic64_fetch_andnot_relaxed(i, v);
3799 }                                                2113 }
                                                   >> 2114 #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
                                                   >> 2115 #endif
3800                                                  2116 
3801 /**                                           !! 2117 #ifndef arch_atomic64_fetch_andnot
3802  * raw_atomic64_fetch_or() - atomic bitwise O << 
3803  * @i: s64 value                              << 
3804  * @v: pointer to atomic64_t                  << 
3805  *                                            << 
3806  * Atomically updates @v to (@v | @i) with fu << 
3807  *                                            << 
3808  * Safe to use in noinstr code; prefer atomic << 
3809  *                                            << 
3810  * Return: The original value of @v.          << 
3811  */                                           << 
3812 static __always_inline s64                       2118 static __always_inline s64
3813 raw_atomic64_fetch_or(s64 i, atomic64_t *v)   !! 2119 arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3814 {                                                2120 {
3815 #if defined(arch_atomic64_fetch_or)           << 
3816         return arch_atomic64_fetch_or(i, v);  << 
3817 #elif defined(arch_atomic64_fetch_or_relaxed) << 
3818         s64 ret;                                 2121         s64 ret;
3819         __atomic_pre_full_fence();               2122         __atomic_pre_full_fence();
3820         ret = arch_atomic64_fetch_or_relaxed( !! 2123         ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3821         __atomic_post_full_fence();              2124         __atomic_post_full_fence();
3822         return ret;                              2125         return ret;
3823 #else                                         << 
3824 #error "Unable to define raw_atomic64_fetch_o << 
3825 #endif                                        << 
3826 }                                                2126 }
                                                   >> 2127 #define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
                                                   >> 2128 #endif
3827                                                  2129 
3828 /**                                           !! 2130 #endif /* arch_atomic64_fetch_andnot_relaxed */
3829  * raw_atomic64_fetch_or_acquire() - atomic b !! 2131 
3830  * @i: s64 value                              !! 2132 #ifndef arch_atomic64_fetch_or_relaxed
3831  * @v: pointer to atomic64_t                  !! 2133 #define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
3832  *                                            !! 2134 #define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
3833  * Atomically updates @v to (@v | @i) with ac !! 2135 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
3834  *                                            !! 2136 #else /* arch_atomic64_fetch_or_relaxed */
3835  * Safe to use in noinstr code; prefer atomic !! 2137 
3836  *                                            !! 2138 #ifndef arch_atomic64_fetch_or_acquire
3837  * Return: The original value of @v.          << 
3838  */                                           << 
3839 static __always_inline s64                       2139 static __always_inline s64
3840 raw_atomic64_fetch_or_acquire(s64 i, atomic64 !! 2140 arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3841 {                                                2141 {
3842 #if defined(arch_atomic64_fetch_or_acquire)   << 
3843         return arch_atomic64_fetch_or_acquire << 
3844 #elif defined(arch_atomic64_fetch_or_relaxed) << 
3845         s64 ret = arch_atomic64_fetch_or_rela    2142         s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3846         __atomic_acquire_fence();                2143         __atomic_acquire_fence();
3847         return ret;                              2144         return ret;
3848 #elif defined(arch_atomic64_fetch_or)         << 
3849         return arch_atomic64_fetch_or(i, v);  << 
3850 #else                                         << 
3851 #error "Unable to define raw_atomic64_fetch_o << 
3852 #endif                                        << 
3853 }                                                2145 }
                                                   >> 2146 #define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
                                                   >> 2147 #endif
3854                                                  2148 
3855 /**                                           !! 2149 #ifndef arch_atomic64_fetch_or_release
3856  * raw_atomic64_fetch_or_release() - atomic b << 
3857  * @i: s64 value                              << 
3858  * @v: pointer to atomic64_t                  << 
3859  *                                            << 
3860  * Atomically updates @v to (@v | @i) with re << 
3861  *                                            << 
3862  * Safe to use in noinstr code; prefer atomic << 
3863  *                                            << 
3864  * Return: The original value of @v.          << 
3865  */                                           << 
3866 static __always_inline s64                       2150 static __always_inline s64
3867 raw_atomic64_fetch_or_release(s64 i, atomic64 !! 2151 arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3868 {                                                2152 {
3869 #if defined(arch_atomic64_fetch_or_release)   << 
3870         return arch_atomic64_fetch_or_release << 
3871 #elif defined(arch_atomic64_fetch_or_relaxed) << 
3872         __atomic_release_fence();                2153         __atomic_release_fence();
3873         return arch_atomic64_fetch_or_relaxed    2154         return arch_atomic64_fetch_or_relaxed(i, v);
3874 #elif defined(arch_atomic64_fetch_or)         << 
3875         return arch_atomic64_fetch_or(i, v);  << 
3876 #else                                         << 
3877 #error "Unable to define raw_atomic64_fetch_o << 
3878 #endif                                        << 
3879 }                                                2155 }
3880                                               !! 2156 #define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
3881 /**                                           << 
3882  * raw_atomic64_fetch_or_relaxed() - atomic b << 
3883  * @i: s64 value                              << 
3884  * @v: pointer to atomic64_t                  << 
3885  *                                            << 
3886  * Atomically updates @v to (@v | @i) with re << 
3887  *                                            << 
3888  * Safe to use in noinstr code; prefer atomic << 
3889  *                                            << 
3890  * Return: The original value of @v.          << 
3891  */                                           << 
3892 static __always_inline s64                    << 
3893 raw_atomic64_fetch_or_relaxed(s64 i, atomic64 << 
3894 {                                             << 
3895 #if defined(arch_atomic64_fetch_or_relaxed)   << 
3896         return arch_atomic64_fetch_or_relaxed << 
3897 #elif defined(arch_atomic64_fetch_or)         << 
3898         return arch_atomic64_fetch_or(i, v);  << 
3899 #else                                         << 
3900 #error "Unable to define raw_atomic64_fetch_o << 
3901 #endif                                           2157 #endif
3902 }                                             << 
3903                                               << 
3904 /**                                           << 
3905  * raw_atomic64_xor() - atomic bitwise XOR wi << 
3906  * @i: s64 value                              << 
3907  * @v: pointer to atomic64_t                  << 
3908  *                                            << 
3909  * Atomically updates @v to (@v ^ @i) with re << 
3910  *                                            << 
3911  * Safe to use in noinstr code; prefer atomic << 
3912  *                                            << 
3913  * Return: Nothing.                           << 
3914  */                                           << 
3915 static __always_inline void                   << 
3916 raw_atomic64_xor(s64 i, atomic64_t *v)        << 
3917 {                                             << 
3918         arch_atomic64_xor(i, v);              << 
3919 }                                             << 
3920                                                  2158 
3921 /**                                           !! 2159 #ifndef arch_atomic64_fetch_or
3922  * raw_atomic64_fetch_xor() - atomic bitwise  << 
3923  * @i: s64 value                              << 
3924  * @v: pointer to atomic64_t                  << 
3925  *                                            << 
3926  * Atomically updates @v to (@v ^ @i) with fu << 
3927  *                                            << 
3928  * Safe to use in noinstr code; prefer atomic << 
3929  *                                            << 
3930  * Return: The original value of @v.          << 
3931  */                                           << 
3932 static __always_inline s64                       2160 static __always_inline s64
3933 raw_atomic64_fetch_xor(s64 i, atomic64_t *v)  !! 2161 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
3934 {                                                2162 {
3935 #if defined(arch_atomic64_fetch_xor)          << 
3936         return arch_atomic64_fetch_xor(i, v); << 
3937 #elif defined(arch_atomic64_fetch_xor_relaxed << 
3938         s64 ret;                                 2163         s64 ret;
3939         __atomic_pre_full_fence();               2164         __atomic_pre_full_fence();
3940         ret = arch_atomic64_fetch_xor_relaxed !! 2165         ret = arch_atomic64_fetch_or_relaxed(i, v);
3941         __atomic_post_full_fence();              2166         __atomic_post_full_fence();
3942         return ret;                              2167         return ret;
3943 #else                                         << 
3944 #error "Unable to define raw_atomic64_fetch_x << 
3945 #endif                                        << 
3946 }                                                2168 }
                                                   >> 2169 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
                                                   >> 2170 #endif
3947                                                  2171 
3948 /**                                           !! 2172 #endif /* arch_atomic64_fetch_or_relaxed */
3949  * raw_atomic64_fetch_xor_acquire() - atomic  !! 2173 
3950  * @i: s64 value                              !! 2174 #ifndef arch_atomic64_fetch_xor_relaxed
3951  * @v: pointer to atomic64_t                  !! 2175 #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
3952  *                                            !! 2176 #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
3953  * Atomically updates @v to (@v ^ @i) with ac !! 2177 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
3954  *                                            !! 2178 #else /* arch_atomic64_fetch_xor_relaxed */
3955  * Safe to use in noinstr code; prefer atomic !! 2179 
3956  *                                            !! 2180 #ifndef arch_atomic64_fetch_xor_acquire
3957  * Return: The original value of @v.          << 
3958  */                                           << 
3959 static __always_inline s64                       2181 static __always_inline s64
3960 raw_atomic64_fetch_xor_acquire(s64 i, atomic6 !! 2182 arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3961 {                                                2183 {
3962 #if defined(arch_atomic64_fetch_xor_acquire)  << 
3963         return arch_atomic64_fetch_xor_acquir << 
3964 #elif defined(arch_atomic64_fetch_xor_relaxed << 
3965         s64 ret = arch_atomic64_fetch_xor_rel    2184         s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3966         __atomic_acquire_fence();                2185         __atomic_acquire_fence();
3967         return ret;                              2186         return ret;
3968 #elif defined(arch_atomic64_fetch_xor)        << 
3969         return arch_atomic64_fetch_xor(i, v); << 
3970 #else                                         << 
3971 #error "Unable to define raw_atomic64_fetch_x << 
3972 #endif                                        << 
3973 }                                                2187 }
                                                   >> 2188 #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
                                                   >> 2189 #endif
3974                                                  2190 
3975 /**                                           !! 2191 #ifndef arch_atomic64_fetch_xor_release
3976  * raw_atomic64_fetch_xor_release() - atomic  << 
3977  * @i: s64 value                              << 
3978  * @v: pointer to atomic64_t                  << 
3979  *                                            << 
3980  * Atomically updates @v to (@v ^ @i) with re << 
3981  *                                            << 
3982  * Safe to use in noinstr code; prefer atomic << 
3983  *                                            << 
3984  * Return: The original value of @v.          << 
3985  */                                           << 
3986 static __always_inline s64                       2192 static __always_inline s64
3987 raw_atomic64_fetch_xor_release(s64 i, atomic6 !! 2193 arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3988 {                                                2194 {
3989 #if defined(arch_atomic64_fetch_xor_release)  << 
3990         return arch_atomic64_fetch_xor_releas << 
3991 #elif defined(arch_atomic64_fetch_xor_relaxed << 
3992         __atomic_release_fence();                2195         __atomic_release_fence();
3993         return arch_atomic64_fetch_xor_relaxe    2196         return arch_atomic64_fetch_xor_relaxed(i, v);
3994 #elif defined(arch_atomic64_fetch_xor)        << 
3995         return arch_atomic64_fetch_xor(i, v); << 
3996 #else                                         << 
3997 #error "Unable to define raw_atomic64_fetch_x << 
3998 #endif                                        << 
3999 }                                                2197 }
4000                                               !! 2198 #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
4001 /**                                           << 
4002  * raw_atomic64_fetch_xor_relaxed() - atomic  << 
4003  * @i: s64 value                              << 
4004  * @v: pointer to atomic64_t                  << 
4005  *                                            << 
4006  * Atomically updates @v to (@v ^ @i) with re << 
4007  *                                            << 
4008  * Safe to use in noinstr code; prefer atomic << 
4009  *                                            << 
4010  * Return: The original value of @v.          << 
4011  */                                           << 
4012 static __always_inline s64                    << 
4013 raw_atomic64_fetch_xor_relaxed(s64 i, atomic6 << 
4014 {                                             << 
4015 #if defined(arch_atomic64_fetch_xor_relaxed)  << 
4016         return arch_atomic64_fetch_xor_relaxe << 
4017 #elif defined(arch_atomic64_fetch_xor)        << 
4018         return arch_atomic64_fetch_xor(i, v); << 
4019 #else                                         << 
4020 #error "Unable to define raw_atomic64_fetch_x << 
4021 #endif                                           2199 #endif
4022 }                                             << 
4023                                                  2200 
4024 /**                                           !! 2201 #ifndef arch_atomic64_fetch_xor
4025  * raw_atomic64_xchg() - atomic exchange with << 
4026  * @v: pointer to atomic64_t                  << 
4027  * @new: s64 value to assign                  << 
4028  *                                            << 
4029  * Atomically updates @v to @new with full or << 
4030  *                                            << 
4031  * Safe to use in noinstr code; prefer atomic << 
4032  *                                            << 
4033  * Return: The original value of @v.          << 
4034  */                                           << 
4035 static __always_inline s64                       2202 static __always_inline s64
4036 raw_atomic64_xchg(atomic64_t *v, s64 new)     !! 2203 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
4037 {                                                2204 {
4038 #if defined(arch_atomic64_xchg)               << 
4039         return arch_atomic64_xchg(v, new);    << 
4040 #elif defined(arch_atomic64_xchg_relaxed)     << 
4041         s64 ret;                                 2205         s64 ret;
4042         __atomic_pre_full_fence();               2206         __atomic_pre_full_fence();
4043         ret = arch_atomic64_xchg_relaxed(v, n !! 2207         ret = arch_atomic64_fetch_xor_relaxed(i, v);
4044         __atomic_post_full_fence();              2208         __atomic_post_full_fence();
4045         return ret;                              2209         return ret;
4046 #else                                         << 
4047         return raw_xchg(&v->counter, new);    << 
4048 #endif                                        << 
4049 }                                                2210 }
                                                   >> 2211 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
                                                   >> 2212 #endif
4050                                                  2213 
4051 /**                                           !! 2214 #endif /* arch_atomic64_fetch_xor_relaxed */
4052  * raw_atomic64_xchg_acquire() - atomic excha !! 2215 
4053  * @v: pointer to atomic64_t                  !! 2216 #ifndef arch_atomic64_xchg_relaxed
4054  * @new: s64 value to assign                  !! 2217 #define arch_atomic64_xchg_acquire arch_atomic64_xchg
4055  *                                            !! 2218 #define arch_atomic64_xchg_release arch_atomic64_xchg
4056  * Atomically updates @v to @new with acquire !! 2219 #define arch_atomic64_xchg_relaxed arch_atomic64_xchg
4057  *                                            !! 2220 #else /* arch_atomic64_xchg_relaxed */
4058  * Safe to use in noinstr code; prefer atomic !! 2221 
4059  *                                            !! 2222 #ifndef arch_atomic64_xchg_acquire
4060  * Return: The original value of @v.          << 
4061  */                                           << 
4062 static __always_inline s64                       2223 static __always_inline s64
4063 raw_atomic64_xchg_acquire(atomic64_t *v, s64  !! 2224 arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
4064 {                                                2225 {
4065 #if defined(arch_atomic64_xchg_acquire)       !! 2226         s64 ret = arch_atomic64_xchg_relaxed(v, i);
4066         return arch_atomic64_xchg_acquire(v,  << 
4067 #elif defined(arch_atomic64_xchg_relaxed)     << 
4068         s64 ret = arch_atomic64_xchg_relaxed( << 
4069         __atomic_acquire_fence();                2227         __atomic_acquire_fence();
4070         return ret;                              2228         return ret;
4071 #elif defined(arch_atomic64_xchg)             << 
4072         return arch_atomic64_xchg(v, new);    << 
4073 #else                                         << 
4074         return raw_xchg_acquire(&v->counter,  << 
4075 #endif                                        << 
4076 }                                                2229 }
                                                   >> 2230 #define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
                                                   >> 2231 #endif
4077                                                  2232 
4078 /**                                           !! 2233 #ifndef arch_atomic64_xchg_release
4079  * raw_atomic64_xchg_release() - atomic excha << 
4080  * @v: pointer to atomic64_t                  << 
4081  * @new: s64 value to assign                  << 
4082  *                                            << 
4083  * Atomically updates @v to @new with release << 
4084  *                                            << 
4085  * Safe to use in noinstr code; prefer atomic << 
4086  *                                            << 
4087  * Return: The original value of @v.          << 
4088  */                                           << 
4089 static __always_inline s64                       2234 static __always_inline s64
4090 raw_atomic64_xchg_release(atomic64_t *v, s64  !! 2235 arch_atomic64_xchg_release(atomic64_t *v, s64 i)
4091 {                                                2236 {
4092 #if defined(arch_atomic64_xchg_release)       << 
4093         return arch_atomic64_xchg_release(v,  << 
4094 #elif defined(arch_atomic64_xchg_relaxed)     << 
4095         __atomic_release_fence();                2237         __atomic_release_fence();
4096         return arch_atomic64_xchg_relaxed(v,  !! 2238         return arch_atomic64_xchg_relaxed(v, i);
4097 #elif defined(arch_atomic64_xchg)             << 
4098         return arch_atomic64_xchg(v, new);    << 
4099 #else                                         << 
4100         return raw_xchg_release(&v->counter,  << 
4101 #endif                                        << 
4102 }                                                2239 }
4103                                               !! 2240 #define arch_atomic64_xchg_release arch_atomic64_xchg_release
4104 /**                                           << 
4105  * raw_atomic64_xchg_relaxed() - atomic excha << 
4106  * @v: pointer to atomic64_t                  << 
4107  * @new: s64 value to assign                  << 
4108  *                                            << 
4109  * Atomically updates @v to @new with relaxed << 
4110  *                                            << 
4111  * Safe to use in noinstr code; prefer atomic << 
4112  *                                            << 
4113  * Return: The original value of @v.          << 
4114  */                                           << 
4115 static __always_inline s64                    << 
4116 raw_atomic64_xchg_relaxed(atomic64_t *v, s64  << 
4117 {                                             << 
4118 #if defined(arch_atomic64_xchg_relaxed)       << 
4119         return arch_atomic64_xchg_relaxed(v,  << 
4120 #elif defined(arch_atomic64_xchg)             << 
4121         return arch_atomic64_xchg(v, new);    << 
4122 #else                                         << 
4123         return raw_xchg_relaxed(&v->counter,  << 
4124 #endif                                           2241 #endif
4125 }                                             << 
4126                                                  2242 
4127 /**                                           !! 2243 #ifndef arch_atomic64_xchg
4128  * raw_atomic64_cmpxchg() - atomic compare an << 
4129  * @v: pointer to atomic64_t                  << 
4130  * @old: s64 value to compare with            << 
4131  * @new: s64 value to assign                  << 
4132  *                                            << 
4133  * If (@v == @old), atomically updates @v to  << 
4134  * Otherwise, @v is not modified and relaxed  << 
4135  *                                            << 
4136  * Safe to use in noinstr code; prefer atomic << 
4137  *                                            << 
4138  * Return: The original value of @v.          << 
4139  */                                           << 
4140 static __always_inline s64                       2244 static __always_inline s64
4141 raw_atomic64_cmpxchg(atomic64_t *v, s64 old,  !! 2245 arch_atomic64_xchg(atomic64_t *v, s64 i)
4142 {                                                2246 {
4143 #if defined(arch_atomic64_cmpxchg)            << 
4144         return arch_atomic64_cmpxchg(v, old,  << 
4145 #elif defined(arch_atomic64_cmpxchg_relaxed)  << 
4146         s64 ret;                                 2247         s64 ret;
4147         __atomic_pre_full_fence();               2248         __atomic_pre_full_fence();
4148         ret = arch_atomic64_cmpxchg_relaxed(v !! 2249         ret = arch_atomic64_xchg_relaxed(v, i);
4149         __atomic_post_full_fence();              2250         __atomic_post_full_fence();
4150         return ret;                              2251         return ret;
4151 #else                                         << 
4152         return raw_cmpxchg(&v->counter, old,  << 
4153 #endif                                        << 
4154 }                                                2252 }
                                                   >> 2253 #define arch_atomic64_xchg arch_atomic64_xchg
                                                   >> 2254 #endif
4155                                                  2255 
4156 /**                                           !! 2256 #endif /* arch_atomic64_xchg_relaxed */
4157  * raw_atomic64_cmpxchg_acquire() - atomic co !! 2257 
4158  * @v: pointer to atomic64_t                  !! 2258 #ifndef arch_atomic64_cmpxchg_relaxed
4159  * @old: s64 value to compare with            !! 2259 #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
4160  * @new: s64 value to assign                  !! 2260 #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
4161  *                                            !! 2261 #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
4162  * If (@v == @old), atomically updates @v to  !! 2262 #else /* arch_atomic64_cmpxchg_relaxed */
4163  * Otherwise, @v is not modified and relaxed  !! 2263 
4164  *                                            !! 2264 #ifndef arch_atomic64_cmpxchg_acquire
4165  * Safe to use in noinstr code; prefer atomic << 
4166  *                                            << 
4167  * Return: The original value of @v.          << 
4168  */                                           << 
4169 static __always_inline s64                       2265 static __always_inline s64
4170 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s !! 2266 arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4171 {                                                2267 {
4172 #if defined(arch_atomic64_cmpxchg_acquire)    << 
4173         return arch_atomic64_cmpxchg_acquire( << 
4174 #elif defined(arch_atomic64_cmpxchg_relaxed)  << 
4175         s64 ret = arch_atomic64_cmpxchg_relax    2268         s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4176         __atomic_acquire_fence();                2269         __atomic_acquire_fence();
4177         return ret;                              2270         return ret;
4178 #elif defined(arch_atomic64_cmpxchg)          << 
4179         return arch_atomic64_cmpxchg(v, old,  << 
4180 #else                                         << 
4181         return raw_cmpxchg_acquire(&v->counte << 
4182 #endif                                        << 
4183 }                                                2271 }
                                                   >> 2272 #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
                                                   >> 2273 #endif
4184                                                  2274 
4185 /**                                           !! 2275 #ifndef arch_atomic64_cmpxchg_release
4186  * raw_atomic64_cmpxchg_release() - atomic co << 
4187  * @v: pointer to atomic64_t                  << 
4188  * @old: s64 value to compare with            << 
4189  * @new: s64 value to assign                  << 
4190  *                                            << 
4191  * If (@v == @old), atomically updates @v to  << 
4192  * Otherwise, @v is not modified and relaxed  << 
4193  *                                            << 
4194  * Safe to use in noinstr code; prefer atomic << 
4195  *                                            << 
4196  * Return: The original value of @v.          << 
4197  */                                           << 
4198 static __always_inline s64                       2276 static __always_inline s64
4199 raw_atomic64_cmpxchg_release(atomic64_t *v, s !! 2277 arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4200 {                                                2278 {
4201 #if defined(arch_atomic64_cmpxchg_release)    << 
4202         return arch_atomic64_cmpxchg_release( << 
4203 #elif defined(arch_atomic64_cmpxchg_relaxed)  << 
4204         __atomic_release_fence();                2279         __atomic_release_fence();
4205         return arch_atomic64_cmpxchg_relaxed(    2280         return arch_atomic64_cmpxchg_relaxed(v, old, new);
4206 #elif defined(arch_atomic64_cmpxchg)          << 
4207         return arch_atomic64_cmpxchg(v, old,  << 
4208 #else                                         << 
4209         return raw_cmpxchg_release(&v->counte << 
4210 #endif                                        << 
4211 }                                                2281 }
                                                   >> 2282 #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
                                                   >> 2283 #endif
4212                                                  2284 
4213 /**                                           !! 2285 #ifndef arch_atomic64_cmpxchg
4214  * raw_atomic64_cmpxchg_relaxed() - atomic co << 
4215  * @v: pointer to atomic64_t                  << 
4216  * @old: s64 value to compare with            << 
4217  * @new: s64 value to assign                  << 
4218  *                                            << 
4219  * If (@v == @old), atomically updates @v to  << 
4220  * Otherwise, @v is not modified and relaxed  << 
4221  *                                            << 
4222  * Safe to use in noinstr code; prefer atomic << 
4223  *                                            << 
4224  * Return: The original value of @v.          << 
4225  */                                           << 
4226 static __always_inline s64                       2286 static __always_inline s64
4227 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s !! 2287 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4228 {                                                2288 {
4229 #if defined(arch_atomic64_cmpxchg_relaxed)    !! 2289         s64 ret;
4230         return arch_atomic64_cmpxchg_relaxed( !! 2290         __atomic_pre_full_fence();
4231 #elif defined(arch_atomic64_cmpxchg)          !! 2291         ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4232         return arch_atomic64_cmpxchg(v, old,  !! 2292         __atomic_post_full_fence();
4233 #else                                         !! 2293         return ret;
4234         return raw_cmpxchg_relaxed(&v->counte << 
4235 #endif                                        << 
4236 }                                                2294 }
                                                   >> 2295 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
                                                   >> 2296 #endif
4237                                                  2297 
4238 /**                                           !! 2298 #endif /* arch_atomic64_cmpxchg_relaxed */
4239  * raw_atomic64_try_cmpxchg() - atomic compar !! 2299 
4240  * @v: pointer to atomic64_t                  !! 2300 #ifndef arch_atomic64_try_cmpxchg_relaxed
4241  * @old: pointer to s64 value to compare with !! 2301 #ifdef arch_atomic64_try_cmpxchg
4242  * @new: s64 value to assign                  !! 2302 #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
4243  *                                            !! 2303 #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
4244  * If (@v == @old), atomically updates @v to  !! 2304 #define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
4245  * Otherwise, @v is not modified, @old is upd !! 2305 #endif /* arch_atomic64_try_cmpxchg */
4246  * and relaxed ordering is provided.          !! 2306 
4247  *                                            !! 2307 #ifndef arch_atomic64_try_cmpxchg
4248  * Safe to use in noinstr code; prefer atomic << 
4249  *                                            << 
4250  * Return: @true if the exchange occured, @fa << 
4251  */                                           << 
4252 static __always_inline bool                      2308 static __always_inline bool
4253 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 * !! 2309 arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4254 {                                                2310 {
4255 #if defined(arch_atomic64_try_cmpxchg)        << 
4256         return arch_atomic64_try_cmpxchg(v, o << 
4257 #elif defined(arch_atomic64_try_cmpxchg_relax << 
4258         bool ret;                             << 
4259         __atomic_pre_full_fence();            << 
4260         ret = arch_atomic64_try_cmpxchg_relax << 
4261         __atomic_post_full_fence();           << 
4262         return ret;                           << 
4263 #else                                         << 
4264         s64 r, o = *old;                         2311         s64 r, o = *old;
4265         r = raw_atomic64_cmpxchg(v, o, new);  !! 2312         r = arch_atomic64_cmpxchg(v, o, new);
4266         if (unlikely(r != o))                    2313         if (unlikely(r != o))
4267                 *old = r;                        2314                 *old = r;
4268         return likely(r == o);                   2315         return likely(r == o);
4269 #endif                                        << 
4270 }                                                2316 }
                                                   >> 2317 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
                                                   >> 2318 #endif
4271                                                  2319 
4272 /**                                           !! 2320 #ifndef arch_atomic64_try_cmpxchg_acquire
4273  * raw_atomic64_try_cmpxchg_acquire() - atomi << 
4274  * @v: pointer to atomic64_t                  << 
4275  * @old: pointer to s64 value to compare with << 
4276  * @new: s64 value to assign                  << 
4277  *                                            << 
4278  * If (@v == @old), atomically updates @v to  << 
4279  * Otherwise, @v is not modified, @old is upd << 
4280  * and relaxed ordering is provided.          << 
4281  *                                            << 
4282  * Safe to use in noinstr code; prefer atomic << 
4283  *                                            << 
4284  * Return: @true if the exchange occured, @fa << 
4285  */                                           << 
4286 static __always_inline bool                      2321 static __always_inline bool
4287 raw_atomic64_try_cmpxchg_acquire(atomic64_t * !! 2322 arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4288 {                                                2323 {
4289 #if defined(arch_atomic64_try_cmpxchg_acquire << 
4290         return arch_atomic64_try_cmpxchg_acqu << 
4291 #elif defined(arch_atomic64_try_cmpxchg_relax << 
4292         bool ret = arch_atomic64_try_cmpxchg_ << 
4293         __atomic_acquire_fence();             << 
4294         return ret;                           << 
4295 #elif defined(arch_atomic64_try_cmpxchg)      << 
4296         return arch_atomic64_try_cmpxchg(v, o << 
4297 #else                                         << 
4298         s64 r, o = *old;                         2324         s64 r, o = *old;
4299         r = raw_atomic64_cmpxchg_acquire(v, o !! 2325         r = arch_atomic64_cmpxchg_acquire(v, o, new);
4300         if (unlikely(r != o))                    2326         if (unlikely(r != o))
4301                 *old = r;                        2327                 *old = r;
4302         return likely(r == o);                   2328         return likely(r == o);
4303 #endif                                        << 
4304 }                                                2329 }
                                                   >> 2330 #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
                                                   >> 2331 #endif
4305                                                  2332 
4306 /**                                           !! 2333 #ifndef arch_atomic64_try_cmpxchg_release
4307  * raw_atomic64_try_cmpxchg_release() - atomi << 
4308  * @v: pointer to atomic64_t                  << 
4309  * @old: pointer to s64 value to compare with << 
4310  * @new: s64 value to assign                  << 
4311  *                                            << 
4312  * If (@v == @old), atomically updates @v to  << 
4313  * Otherwise, @v is not modified, @old is upd << 
4314  * and relaxed ordering is provided.          << 
4315  *                                            << 
4316  * Safe to use in noinstr code; prefer atomic << 
4317  *                                            << 
4318  * Return: @true if the exchange occured, @fa << 
4319  */                                           << 
4320 static __always_inline bool                      2334 static __always_inline bool
4321 raw_atomic64_try_cmpxchg_release(atomic64_t * !! 2335 arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4322 {                                                2336 {
4323 #if defined(arch_atomic64_try_cmpxchg_release << 
4324         return arch_atomic64_try_cmpxchg_rele << 
4325 #elif defined(arch_atomic64_try_cmpxchg_relax << 
4326         __atomic_release_fence();             << 
4327         return arch_atomic64_try_cmpxchg_rela << 
4328 #elif defined(arch_atomic64_try_cmpxchg)      << 
4329         return arch_atomic64_try_cmpxchg(v, o << 
4330 #else                                         << 
4331         s64 r, o = *old;                         2337         s64 r, o = *old;
4332         r = raw_atomic64_cmpxchg_release(v, o !! 2338         r = arch_atomic64_cmpxchg_release(v, o, new);
4333         if (unlikely(r != o))                    2339         if (unlikely(r != o))
4334                 *old = r;                        2340                 *old = r;
4335         return likely(r == o);                   2341         return likely(r == o);
4336 #endif                                        << 
4337 }                                                2342 }
                                                   >> 2343 #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
                                                   >> 2344 #endif
4338                                                  2345 
4339 /**                                           !! 2346 #ifndef arch_atomic64_try_cmpxchg_relaxed
4340  * raw_atomic64_try_cmpxchg_relaxed() - atomi << 
4341  * @v: pointer to atomic64_t                  << 
4342  * @old: pointer to s64 value to compare with << 
4343  * @new: s64 value to assign                  << 
4344  *                                            << 
4345  * If (@v == @old), atomically updates @v to  << 
4346  * Otherwise, @v is not modified, @old is upd << 
4347  * and relaxed ordering is provided.          << 
4348  *                                            << 
4349  * Safe to use in noinstr code; prefer atomic << 
4350  *                                            << 
4351  * Return: @true if the exchange occured, @fa << 
4352  */                                           << 
4353 static __always_inline bool                      2347 static __always_inline bool
4354 raw_atomic64_try_cmpxchg_relaxed(atomic64_t * !! 2348 arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
4355 {                                                2349 {
4356 #if defined(arch_atomic64_try_cmpxchg_relaxed << 
4357         return arch_atomic64_try_cmpxchg_rela << 
4358 #elif defined(arch_atomic64_try_cmpxchg)      << 
4359         return arch_atomic64_try_cmpxchg(v, o << 
4360 #else                                         << 
4361         s64 r, o = *old;                         2350         s64 r, o = *old;
4362         r = raw_atomic64_cmpxchg_relaxed(v, o !! 2351         r = arch_atomic64_cmpxchg_relaxed(v, o, new);
4363         if (unlikely(r != o))                    2352         if (unlikely(r != o))
4364                 *old = r;                        2353                 *old = r;
4365         return likely(r == o);                   2354         return likely(r == o);
                                                   >> 2355 }
                                                   >> 2356 #define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
                                                   >> 2357 #endif
                                                   >> 2358 
                                                   >> 2359 #else /* arch_atomic64_try_cmpxchg_relaxed */
                                                   >> 2360 
                                                   >> 2361 #ifndef arch_atomic64_try_cmpxchg_acquire
                                                   >> 2362 static __always_inline bool
                                                   >> 2363 arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
                                                   >> 2364 {
                                                   >> 2365         bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
                                                   >> 2366         __atomic_acquire_fence();
                                                   >> 2367         return ret;
                                                   >> 2368 }
                                                   >> 2369 #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
                                                   >> 2370 #endif
                                                   >> 2371 
                                                   >> 2372 #ifndef arch_atomic64_try_cmpxchg_release
                                                   >> 2373 static __always_inline bool
                                                   >> 2374 arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
                                                   >> 2375 {
                                                   >> 2376         __atomic_release_fence();
                                                   >> 2377         return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
                                                   >> 2378 }
                                                   >> 2379 #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
4366 #endif                                           2380 #endif
                                                   >> 2381 
                                                   >> 2382 #ifndef arch_atomic64_try_cmpxchg
                                                   >> 2383 static __always_inline bool
                                                   >> 2384 arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
                                                   >> 2385 {
                                                   >> 2386         bool ret;
                                                   >> 2387         __atomic_pre_full_fence();
                                                   >> 2388         ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
                                                   >> 2389         __atomic_post_full_fence();
                                                   >> 2390         return ret;
4367 }                                                2391 }
                                                   >> 2392 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
                                                   >> 2393 #endif
4368                                                  2394 
                                                   >> 2395 #endif /* arch_atomic64_try_cmpxchg_relaxed */
                                                   >> 2396 
                                                   >> 2397 #ifndef arch_atomic64_sub_and_test
4369 /**                                              2398 /**
4370  * raw_atomic64_sub_and_test() - atomic subtr !! 2399  * arch_atomic64_sub_and_test - subtract value from variable and test result
4371  * @i: s64 value to subtract                  !! 2400  * @i: integer value to subtract
4372  * @v: pointer to atomic64_t                  !! 2401  * @v: pointer of type atomic64_t
4373  *                                            << 
4374  * Atomically updates @v to (@v - @i) with fu << 
4375  *                                            << 
4376  * Safe to use in noinstr code; prefer atomic << 
4377  *                                               2402  *
4378  * Return: @true if the resulting value of @v !! 2403  * Atomically subtracts @i from @v and returns
                                                   >> 2404  * true if the result is zero, or false for all
                                                   >> 2405  * other cases.
4379  */                                              2406  */
4380 static __always_inline bool                      2407 static __always_inline bool
4381 raw_atomic64_sub_and_test(s64 i, atomic64_t * !! 2408 arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
4382 {                                                2409 {
4383 #if defined(arch_atomic64_sub_and_test)       !! 2410         return arch_atomic64_sub_return(i, v) == 0;
4384         return arch_atomic64_sub_and_test(i,  << 
4385 #else                                         << 
4386         return raw_atomic64_sub_return(i, v)  << 
4387 #endif                                        << 
4388 }                                                2411 }
                                                   >> 2412 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
                                                   >> 2413 #endif
4389                                                  2414 
                                                   >> 2415 #ifndef arch_atomic64_dec_and_test
4390 /**                                              2416 /**
4391  * raw_atomic64_dec_and_test() - atomic decre !! 2417  * arch_atomic64_dec_and_test - decrement and test
4392  * @v: pointer to atomic64_t                  !! 2418  * @v: pointer of type atomic64_t
4393  *                                            << 
4394  * Atomically updates @v to (@v - 1) with ful << 
4395  *                                            << 
4396  * Safe to use in noinstr code; prefer atomic << 
4397  *                                               2419  *
4398  * Return: @true if the resulting value of @v !! 2420  * Atomically decrements @v by 1 and
                                                   >> 2421  * returns true if the result is 0, or false for all other
                                                   >> 2422  * cases.
4399  */                                              2423  */
4400 static __always_inline bool                      2424 static __always_inline bool
4401 raw_atomic64_dec_and_test(atomic64_t *v)      !! 2425 arch_atomic64_dec_and_test(atomic64_t *v)
4402 {                                                2426 {
4403 #if defined(arch_atomic64_dec_and_test)       !! 2427         return arch_atomic64_dec_return(v) == 0;
4404         return arch_atomic64_dec_and_test(v); << 
4405 #else                                         << 
4406         return raw_atomic64_dec_return(v) ==  << 
4407 #endif                                        << 
4408 }                                                2428 }
                                                   >> 2429 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
                                                   >> 2430 #endif
4409                                                  2431 
                                                   >> 2432 #ifndef arch_atomic64_inc_and_test
4410 /**                                              2433 /**
4411  * raw_atomic64_inc_and_test() - atomic incre !! 2434  * arch_atomic64_inc_and_test - increment and test
4412  * @v: pointer to atomic64_t                  !! 2435  * @v: pointer of type atomic64_t
4413  *                                            << 
4414  * Atomically updates @v to (@v + 1) with ful << 
4415  *                                               2436  *
4416  * Safe to use in noinstr code; prefer atomic !! 2437  * Atomically increments @v by 1
4417  *                                            !! 2438  * and returns true if the result is zero, or false for all
4418  * Return: @true if the resulting value of @v !! 2439  * other cases.
4419  */                                              2440  */
4420 static __always_inline bool                      2441 static __always_inline bool
4421 raw_atomic64_inc_and_test(atomic64_t *v)      !! 2442 arch_atomic64_inc_and_test(atomic64_t *v)
4422 {                                                2443 {
4423 #if defined(arch_atomic64_inc_and_test)       !! 2444         return arch_atomic64_inc_return(v) == 0;
4424         return arch_atomic64_inc_and_test(v); << 
4425 #else                                         << 
4426         return raw_atomic64_inc_return(v) ==  << 
4427 #endif                                        << 
4428 }                                                2445 }
                                                   >> 2446 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
                                                   >> 2447 #endif
4429                                                  2448 
                                                   >> 2449 #ifndef arch_atomic64_add_negative_relaxed
                                                   >> 2450 #ifdef arch_atomic64_add_negative
                                                   >> 2451 #define arch_atomic64_add_negative_acquire arch_atomic64_add_negative
                                                   >> 2452 #define arch_atomic64_add_negative_release arch_atomic64_add_negative
                                                   >> 2453 #define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative
                                                   >> 2454 #endif /* arch_atomic64_add_negative */
                                                   >> 2455 
                                                   >> 2456 #ifndef arch_atomic64_add_negative
4430 /**                                              2457 /**
4431  * raw_atomic64_add_negative() - atomic add a !! 2458  * arch_atomic64_add_negative - Add and test if negative
4432  * @i: s64 value to add                       !! 2459  * @i: integer value to add
4433  * @v: pointer to atomic64_t                  !! 2460  * @v: pointer of type atomic64_t
4434  *                                            << 
4435  * Atomically updates @v to (@v + @i) with fu << 
4436  *                                            << 
4437  * Safe to use in noinstr code; prefer atomic << 
4438  *                                               2461  *
4439  * Return: @true if the resulting value of @v !! 2462  * Atomically adds @i to @v and returns true if the result is negative,
                                                   >> 2463  * or false when the result is greater than or equal to zero.
4440  */                                              2464  */
4441 static __always_inline bool                      2465 static __always_inline bool
4442 raw_atomic64_add_negative(s64 i, atomic64_t * !! 2466 arch_atomic64_add_negative(s64 i, atomic64_t *v)
4443 {                                                2467 {
4444 #if defined(arch_atomic64_add_negative)       !! 2468         return arch_atomic64_add_return(i, v) < 0;
4445         return arch_atomic64_add_negative(i,  << 
4446 #elif defined(arch_atomic64_add_negative_rela << 
4447         bool ret;                             << 
4448         __atomic_pre_full_fence();            << 
4449         ret = arch_atomic64_add_negative_rela << 
4450         __atomic_post_full_fence();           << 
4451         return ret;                           << 
4452 #else                                         << 
4453         return raw_atomic64_add_return(i, v)  << 
4454 #endif                                        << 
4455 }                                                2469 }
                                                   >> 2470 #define arch_atomic64_add_negative arch_atomic64_add_negative
                                                   >> 2471 #endif
4456                                                  2472 
                                                   >> 2473 #ifndef arch_atomic64_add_negative_acquire
4457 /**                                              2474 /**
4458  * raw_atomic64_add_negative_acquire() - atom !! 2475  * arch_atomic64_add_negative_acquire - Add and test if negative
4459  * @i: s64 value to add                       !! 2476  * @i: integer value to add
4460  * @v: pointer to atomic64_t                  !! 2477  * @v: pointer of type atomic64_t
4461  *                                               2478  *
4462  * Atomically updates @v to (@v + @i) with ac !! 2479  * Atomically adds @i to @v and returns true if the result is negative,
4463  *                                            !! 2480  * or false when the result is greater than or equal to zero.
4464  * Safe to use in noinstr code; prefer atomic << 
4465  *                                            << 
4466  * Return: @true if the resulting value of @v << 
4467  */                                              2481  */
4468 static __always_inline bool                      2482 static __always_inline bool
4469 raw_atomic64_add_negative_acquire(s64 i, atom !! 2483 arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
4470 {                                                2484 {
4471 #if defined(arch_atomic64_add_negative_acquir !! 2485         return arch_atomic64_add_return_acquire(i, v) < 0;
4472         return arch_atomic64_add_negative_acq << 
4473 #elif defined(arch_atomic64_add_negative_rela << 
4474         bool ret = arch_atomic64_add_negative << 
4475         __atomic_acquire_fence();             << 
4476         return ret;                           << 
4477 #elif defined(arch_atomic64_add_negative)     << 
4478         return arch_atomic64_add_negative(i,  << 
4479 #else                                         << 
4480         return raw_atomic64_add_return_acquir << 
4481 #endif                                        << 
4482 }                                                2486 }
                                                   >> 2487 #define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire
                                                   >> 2488 #endif
4483                                                  2489 
                                                   >> 2490 #ifndef arch_atomic64_add_negative_release
4484 /**                                              2491 /**
4485  * raw_atomic64_add_negative_release() - atom !! 2492  * arch_atomic64_add_negative_release - Add and test if negative
4486  * @i: s64 value to add                       !! 2493  * @i: integer value to add
4487  * @v: pointer to atomic64_t                  !! 2494  * @v: pointer of type atomic64_t
4488  *                                               2495  *
4489  * Atomically updates @v to (@v + @i) with re !! 2496  * Atomically adds @i to @v and returns true if the result is negative,
4490  *                                            !! 2497  * or false when the result is greater than or equal to zero.
4491  * Safe to use in noinstr code; prefer atomic << 
4492  *                                            << 
4493  * Return: @true if the resulting value of @v << 
4494  */                                              2498  */
4495 static __always_inline bool                      2499 static __always_inline bool
4496 raw_atomic64_add_negative_release(s64 i, atom !! 2500 arch_atomic64_add_negative_release(s64 i, atomic64_t *v)
4497 {                                                2501 {
4498 #if defined(arch_atomic64_add_negative_releas !! 2502         return arch_atomic64_add_return_release(i, v) < 0;
4499         return arch_atomic64_add_negative_rel << 
4500 #elif defined(arch_atomic64_add_negative_rela << 
4501         __atomic_release_fence();             << 
4502         return arch_atomic64_add_negative_rel << 
4503 #elif defined(arch_atomic64_add_negative)     << 
4504         return arch_atomic64_add_negative(i,  << 
4505 #else                                         << 
4506         return raw_atomic64_add_return_releas << 
4507 #endif                                        << 
4508 }                                                2503 }
                                                   >> 2504 #define arch_atomic64_add_negative_release arch_atomic64_add_negative_release
                                                   >> 2505 #endif
4509                                                  2506 
                                                   >> 2507 #ifndef arch_atomic64_add_negative_relaxed
4510 /**                                              2508 /**
4511  * raw_atomic64_add_negative_relaxed() - atom !! 2509  * arch_atomic64_add_negative_relaxed - Add and test if negative
4512  * @i: s64 value to add                       !! 2510  * @i: integer value to add
4513  * @v: pointer to atomic64_t                  !! 2511  * @v: pointer of type atomic64_t
4514  *                                            << 
4515  * Atomically updates @v to (@v + @i) with re << 
4516  *                                               2512  *
4517  * Safe to use in noinstr code; prefer atomic !! 2513  * Atomically adds @i to @v and returns true if the result is negative,
4518  *                                            !! 2514  * or false when the result is greater than or equal to zero.
4519  * Return: @true if the resulting value of @v << 
4520  */                                              2515  */
4521 static __always_inline bool                      2516 static __always_inline bool
4522 raw_atomic64_add_negative_relaxed(s64 i, atom !! 2517 arch_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
                                                   >> 2518 {
                                                   >> 2519         return arch_atomic64_add_return_relaxed(i, v) < 0;
                                                   >> 2520 }
                                                   >> 2521 #define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative_relaxed
                                                   >> 2522 #endif
                                                   >> 2523 
                                                   >> 2524 #else /* arch_atomic64_add_negative_relaxed */
                                                   >> 2525 
                                                   >> 2526 #ifndef arch_atomic64_add_negative_acquire
                                                   >> 2527 static __always_inline bool
                                                   >> 2528 arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
                                                   >> 2529 {
                                                   >> 2530         bool ret = arch_atomic64_add_negative_relaxed(i, v);
                                                   >> 2531         __atomic_acquire_fence();
                                                   >> 2532         return ret;
                                                   >> 2533 }
                                                   >> 2534 #define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire
                                                   >> 2535 #endif
                                                   >> 2536 
                                                   >> 2537 #ifndef arch_atomic64_add_negative_release
                                                   >> 2538 static __always_inline bool
                                                   >> 2539 arch_atomic64_add_negative_release(s64 i, atomic64_t *v)
4523 {                                                2540 {
4524 #if defined(arch_atomic64_add_negative_relaxe !! 2541         __atomic_release_fence();
4525         return arch_atomic64_add_negative_rel    2542         return arch_atomic64_add_negative_relaxed(i, v);
4526 #elif defined(arch_atomic64_add_negative)     !! 2543 }
4527         return arch_atomic64_add_negative(i,  !! 2544 #define arch_atomic64_add_negative_release arch_atomic64_add_negative_release
4528 #else                                         << 
4529         return raw_atomic64_add_return_relaxe << 
4530 #endif                                           2545 #endif
                                                   >> 2546 
                                                   >> 2547 #ifndef arch_atomic64_add_negative
                                                   >> 2548 static __always_inline bool
                                                   >> 2549 arch_atomic64_add_negative(s64 i, atomic64_t *v)
                                                   >> 2550 {
                                                   >> 2551         bool ret;
                                                   >> 2552         __atomic_pre_full_fence();
                                                   >> 2553         ret = arch_atomic64_add_negative_relaxed(i, v);
                                                   >> 2554         __atomic_post_full_fence();
                                                   >> 2555         return ret;
4531 }                                                2556 }
                                                   >> 2557 #define arch_atomic64_add_negative arch_atomic64_add_negative
                                                   >> 2558 #endif
                                                   >> 2559 
                                                   >> 2560 #endif /* arch_atomic64_add_negative_relaxed */
4532                                                  2561 
                                                   >> 2562 #ifndef arch_atomic64_fetch_add_unless
4533 /**                                              2563 /**
4534  * raw_atomic64_fetch_add_unless() - atomic a !! 2564  * arch_atomic64_fetch_add_unless - add unless the number is already a given value
4535  * @v: pointer to atomic64_t                  !! 2565  * @v: pointer of type atomic64_t
4536  * @a: s64 value to add                       !! 2566  * @a: the amount to add to v...
4537  * @u: s64 value to compare with              !! 2567  * @u: ...unless v is equal to u.
4538  *                                               2568  *
4539  * If (@v != @u), atomically updates @v to (@ !! 2569  * Atomically adds @a to @v, so long as @v was not already @u.
4540  * Otherwise, @v is not modified and relaxed  !! 2570  * Returns original value of @v
4541  *                                            << 
4542  * Safe to use in noinstr code; prefer atomic << 
4543  *                                            << 
4544  * Return: The original value of @v.          << 
4545  */                                              2571  */
4546 static __always_inline s64                       2572 static __always_inline s64
4547 raw_atomic64_fetch_add_unless(atomic64_t *v,  !! 2573 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
4548 {                                                2574 {
4549 #if defined(arch_atomic64_fetch_add_unless)   !! 2575         s64 c = arch_atomic64_read(v);
4550         return arch_atomic64_fetch_add_unless << 
4551 #else                                         << 
4552         s64 c = raw_atomic64_read(v);         << 
4553                                                  2576 
4554         do {                                     2577         do {
4555                 if (unlikely(c == u))            2578                 if (unlikely(c == u))
4556                         break;                   2579                         break;
4557         } while (!raw_atomic64_try_cmpxchg(v, !! 2580         } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
4558                                                  2581 
4559         return c;                                2582         return c;
4560 #endif                                        << 
4561 }                                                2583 }
                                                   >> 2584 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
                                                   >> 2585 #endif
4562                                                  2586 
                                                   >> 2587 #ifndef arch_atomic64_add_unless
4563 /**                                              2588 /**
4564  * raw_atomic64_add_unless() - atomic add unl !! 2589  * arch_atomic64_add_unless - add unless the number is already a given value
4565  * @v: pointer to atomic64_t                  !! 2590  * @v: pointer of type atomic64_t
4566  * @a: s64 value to add                       !! 2591  * @a: the amount to add to v...
4567  * @u: s64 value to compare with              !! 2592  * @u: ...unless v is equal to u.
4568  *                                            << 
4569  * If (@v != @u), atomically updates @v to (@ << 
4570  * Otherwise, @v is not modified and relaxed  << 
4571  *                                            << 
4572  * Safe to use in noinstr code; prefer atomic << 
4573  *                                               2593  *
4574  * Return: @true if @v was updated, @false ot !! 2594  * Atomically adds @a to @v, if @v was not already @u.
                                                   >> 2595  * Returns true if the addition was done.
4575  */                                              2596  */
4576 static __always_inline bool                      2597 static __always_inline bool
4577 raw_atomic64_add_unless(atomic64_t *v, s64 a, !! 2598 arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4578 {                                                2599 {
4579 #if defined(arch_atomic64_add_unless)         !! 2600         return arch_atomic64_fetch_add_unless(v, a, u) != u;
4580         return arch_atomic64_add_unless(v, a, << 
4581 #else                                         << 
4582         return raw_atomic64_fetch_add_unless( << 
4583 #endif                                        << 
4584 }                                                2601 }
                                                   >> 2602 #define arch_atomic64_add_unless arch_atomic64_add_unless
                                                   >> 2603 #endif
4585                                                  2604 
                                                   >> 2605 #ifndef arch_atomic64_inc_not_zero
4586 /**                                              2606 /**
4587  * raw_atomic64_inc_not_zero() - atomic incre !! 2607  * arch_atomic64_inc_not_zero - increment unless the number is zero
4588  * @v: pointer to atomic64_t                  !! 2608  * @v: pointer of type atomic64_t
4589  *                                            << 
4590  * If (@v != 0), atomically updates @v to (@v << 
4591  * Otherwise, @v is not modified and relaxed  << 
4592  *                                            << 
4593  * Safe to use in noinstr code; prefer atomic << 
4594  *                                               2609  *
4595  * Return: @true if @v was updated, @false ot !! 2610  * Atomically increments @v by 1, if @v is non-zero.
                                                   >> 2611  * Returns true if the increment was done.
4596  */                                              2612  */
4597 static __always_inline bool                      2613 static __always_inline bool
4598 raw_atomic64_inc_not_zero(atomic64_t *v)      !! 2614 arch_atomic64_inc_not_zero(atomic64_t *v)
4599 {                                                2615 {
4600 #if defined(arch_atomic64_inc_not_zero)       !! 2616         return arch_atomic64_add_unless(v, 1, 0);
4601         return arch_atomic64_inc_not_zero(v); << 
4602 #else                                         << 
4603         return raw_atomic64_add_unless(v, 1,  << 
4604 #endif                                        << 
4605 }                                                2617 }
                                                   >> 2618 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
                                                   >> 2619 #endif
4606                                                  2620 
4607 /**                                           !! 2621 #ifndef arch_atomic64_inc_unless_negative
4608  * raw_atomic64_inc_unless_negative() - atomi << 
4609  * @v: pointer to atomic64_t                  << 
4610  *                                            << 
4611  * If (@v >= 0), atomically updates @v to (@v << 
4612  * Otherwise, @v is not modified and relaxed  << 
4613  *                                            << 
4614  * Safe to use in noinstr code; prefer atomic << 
4615  *                                            << 
4616  * Return: @true if @v was updated, @false ot << 
4617  */                                           << 
4618 static __always_inline bool                      2622 static __always_inline bool
4619 raw_atomic64_inc_unless_negative(atomic64_t * !! 2623 arch_atomic64_inc_unless_negative(atomic64_t *v)
4620 {                                                2624 {
4621 #if defined(arch_atomic64_inc_unless_negative !! 2625         s64 c = arch_atomic64_read(v);
4622         return arch_atomic64_inc_unless_negat << 
4623 #else                                         << 
4624         s64 c = raw_atomic64_read(v);         << 
4625                                                  2626 
4626         do {                                     2627         do {
4627                 if (unlikely(c < 0))             2628                 if (unlikely(c < 0))
4628                         return false;            2629                         return false;
4629         } while (!raw_atomic64_try_cmpxchg(v, !! 2630         } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
4630                                                  2631 
4631         return true;                             2632         return true;
4632 #endif                                        << 
4633 }                                                2633 }
                                                   >> 2634 #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
                                                   >> 2635 #endif
4634                                                  2636 
4635 /**                                           !! 2637 #ifndef arch_atomic64_dec_unless_positive
4636  * raw_atomic64_dec_unless_positive() - atomi << 
4637  * @v: pointer to atomic64_t                  << 
4638  *                                            << 
4639  * If (@v <= 0), atomically updates @v to (@v << 
4640  * Otherwise, @v is not modified and relaxed  << 
4641  *                                            << 
4642  * Safe to use in noinstr code; prefer atomic << 
4643  *                                            << 
4644  * Return: @true if @v was updated, @false ot << 
4645  */                                           << 
4646 static __always_inline bool                      2638 static __always_inline bool
4647 raw_atomic64_dec_unless_positive(atomic64_t * !! 2639 arch_atomic64_dec_unless_positive(atomic64_t *v)
4648 {                                                2640 {
4649 #if defined(arch_atomic64_dec_unless_positive !! 2641         s64 c = arch_atomic64_read(v);
4650         return arch_atomic64_dec_unless_posit << 
4651 #else                                         << 
4652         s64 c = raw_atomic64_read(v);         << 
4653                                                  2642 
4654         do {                                     2643         do {
4655                 if (unlikely(c > 0))             2644                 if (unlikely(c > 0))
4656                         return false;            2645                         return false;
4657         } while (!raw_atomic64_try_cmpxchg(v, !! 2646         } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
4658                                                  2647 
4659         return true;                             2648         return true;
4660 #endif                                        << 
4661 }                                                2649 }
                                                   >> 2650 #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
                                                   >> 2651 #endif
4662                                                  2652 
4663 /**                                           !! 2653 #ifndef arch_atomic64_dec_if_positive
4664  * raw_atomic64_dec_if_positive() - atomic de << 
4665  * @v: pointer to atomic64_t                  << 
4666  *                                            << 
4667  * If (@v > 0), atomically updates @v to (@v  << 
4668  * Otherwise, @v is not modified and relaxed  << 
4669  *                                            << 
4670  * Safe to use in noinstr code; prefer atomic << 
4671  *                                            << 
4672  * Return: The old value of (@v - 1), regardl << 
4673  */                                           << 
4674 static __always_inline s64                       2654 static __always_inline s64
4675 raw_atomic64_dec_if_positive(atomic64_t *v)   !! 2655 arch_atomic64_dec_if_positive(atomic64_t *v)
4676 {                                                2656 {
4677 #if defined(arch_atomic64_dec_if_positive)    !! 2657         s64 dec, c = arch_atomic64_read(v);
4678         return arch_atomic64_dec_if_positive( << 
4679 #else                                         << 
4680         s64 dec, c = raw_atomic64_read(v);    << 
4681                                                  2658 
4682         do {                                     2659         do {
4683                 dec = c - 1;                     2660                 dec = c - 1;
4684                 if (unlikely(dec < 0))           2661                 if (unlikely(dec < 0))
4685                         break;                   2662                         break;
4686         } while (!raw_atomic64_try_cmpxchg(v, !! 2663         } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
4687                                                  2664 
4688         return dec;                              2665         return dec;
4689 #endif                                        << 
4690 }                                                2666 }
                                                   >> 2667 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
                                                   >> 2668 #endif
4691                                                  2669 
4692 #endif /* _LINUX_ATOMIC_FALLBACK_H */            2670 #endif /* _LINUX_ATOMIC_FALLBACK_H */
4693 // b565db590afeeff0d7c9485ccbca5bb6e155749f   !! 2671 // ad2e2b4d168dbc60a73922616047a9bfa446af36
4694                                                  2672 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php