~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/asm-generic/atomic.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/asm-generic/atomic.h (Version linux-6.12-rc7) and /include/asm-mips/atomic.h (Version linux-2.6.0)


  1 /* SPDX-License-Identifier: GPL-2.0-or-later * << 
  2 /*                                                  1 /*
  3  * Generic C implementation of atomic counter  !!   2  * Atomic operations that C can't guarantee us.  Useful for
  4  * machine independent code.                   !!   3  * resource counting etc..
  5  *                                                  4  *
  6  * Copyright (C) 2007 Red Hat, Inc. All Rights !!   5  * But use these as seldom as possible since they are much more slower
  7  * Written by David Howells (dhowells@redhat.c !!   6  * than regular operations.
                                                   >>   7  *
                                                   >>   8  * This file is subject to the terms and conditions of the GNU General Public
                                                   >>   9  * License.  See the file "COPYING" in the main directory of this archive
                                                   >>  10  * for more details.
                                                   >>  11  *
                                                   >>  12  * Copyright (C) 1996, 97, 99, 2000, 03 by Ralf Baechle
                                                   >>  13  */
                                                   >>  14 #ifndef _ASM_ATOMIC_H
                                                   >>  15 #define _ASM_ATOMIC_H
                                                   >>  16 
                                                   >>  17 #include <linux/config.h>
                                                   >>  18 
                                                   >>  19 #include <asm/system.h>
                                                   >>  20 
                                                   >>  21 typedef struct { volatile int counter; } atomic_t;
                                                   >>  22 typedef struct { volatile __s64 counter; } atomic64_t;
                                                   >>  23 
                                                   >>  24 #ifdef __KERNEL__
                                                   >>  25 #define ATOMIC_INIT(i)    { (i) }
                                                   >>  26 #define ATOMIC64_INIT(i)    { (i) }
                                                   >>  27 
                                                   >>  28 /*
                                                   >>  29  * atomic_read - read atomic variable
                                                   >>  30  * @v: pointer of type atomic_t
                                                   >>  31  *
                                                   >>  32  * Atomically reads the value of @v.  Note that the guaranteed
                                                   >>  33  * useful range of an atomic_t is only 24 bits.
                                                   >>  34  */
                                                   >>  35 #define atomic_read(v)          ((v)->counter)
                                                   >>  36 
                                                   >>  37 /*
                                                   >>  38  * atomic64_read - read atomic variable
                                                   >>  39  * @v: pointer of type atomic64_t
                                                   >>  40  *
                                                   >>  41  */
                                                   >>  42 #define atomic64_read(v)        ((v)->counter)
                                                   >>  43 
                                                   >>  44 /*
                                                   >>  45  * atomic_set - set atomic variable
                                                   >>  46  * @v: pointer of type atomic_t
                                                   >>  47  * @i: required value
                                                   >>  48  *
                                                   >>  49  * Atomically sets the value of @v to @i.  Note that the guaranteed
                                                   >>  50  * useful range of an atomic_t is only 24 bits.
                                                   >>  51  */
                                                   >>  52 #define atomic_set(v,i)         ((v)->counter = (i))
                                                   >>  53 
                                                   >>  54 /*
                                                   >>  55  * atomic64_set - set atomic variable
                                                   >>  56  * @v: pointer of type atomic64_t
                                                   >>  57  * @i: required value
                                                   >>  58  */
                                                   >>  59 #define atomic64_set(v,i)       ((v)->counter = (i))
                                                   >>  60 
                                                   >>  61 #ifndef CONFIG_CPU_HAS_LLSC
                                                   >>  62 
                                                   >>  63 /*
                                                   >>  64  * The MIPS I implementation is only atomic with respect to
                                                   >>  65  * interrupts.  R3000 based multiprocessor machines are rare anyway ...
                                                   >>  66  *
                                                   >>  67  * atomic_add - add integer to atomic variable
                                                   >>  68  * @i: integer value to add
                                                   >>  69  * @v: pointer of type atomic_t
                                                   >>  70  *
                                                   >>  71  * Atomically adds @i to @v.  Note that the guaranteed useful range
                                                   >>  72  * of an atomic_t is only 24 bits.
                                                   >>  73  */
                                                   >>  74 static __inline__ void atomic_add(int i, atomic_t * v)
                                                   >>  75 {
                                                   >>  76         unsigned long flags;
                                                   >>  77 
                                                   >>  78         local_irq_save(flags);
                                                   >>  79         v->counter += i;
                                                   >>  80         local_irq_restore(flags);
                                                   >>  81 }
                                                   >>  82 
                                                   >>  83 /*
                                                   >>  84  * atomic_sub - subtract the atomic variable
                                                   >>  85  * @i: integer value to subtract
                                                   >>  86  * @v: pointer of type atomic_t
                                                   >>  87  *
                                                   >>  88  * Atomically subtracts @i from @v.  Note that the guaranteed
                                                   >>  89  * useful range of an atomic_t is only 24 bits.
                                                   >>  90  */
                                                   >>  91 static __inline__ void atomic_sub(int i, atomic_t * v)
                                                   >>  92 {
                                                   >>  93         unsigned long flags;
                                                   >>  94 
                                                   >>  95         local_irq_save(flags);
                                                   >>  96         v->counter -= i;
                                                   >>  97         local_irq_restore(flags);
                                                   >>  98 }
                                                   >>  99 
                                                   >> 100 static __inline__ int atomic_add_return(int i, atomic_t * v)
                                                   >> 101 {
                                                   >> 102         unsigned long flags;
                                                   >> 103         int temp;
                                                   >> 104 
                                                   >> 105         local_irq_save(flags);
                                                   >> 106         temp = v->counter;
                                                   >> 107         temp += i;
                                                   >> 108         v->counter = temp;
                                                   >> 109         local_irq_restore(flags);
                                                   >> 110 
                                                   >> 111         return temp;
                                                   >> 112 }
                                                   >> 113 
                                                   >> 114 static __inline__ int atomic_sub_return(int i, atomic_t * v)
                                                   >> 115 {
                                                   >> 116         unsigned long flags;
                                                   >> 117         int temp;
                                                   >> 118 
                                                   >> 119         local_irq_save(flags);
                                                   >> 120         temp = v->counter;
                                                   >> 121         temp -= i;
                                                   >> 122         v->counter = temp;
                                                   >> 123         local_irq_restore(flags);
                                                   >> 124 
                                                   >> 125         return temp;
                                                   >> 126 }
                                                   >> 127 
                                                   >> 128 #else
                                                   >> 129 
                                                   >> 130 /*
                                                   >> 131  * ... while for MIPS II and better we can use ll/sc instruction.  This
                                                   >> 132  * implementation is SMP safe ...
  8  */                                               133  */
  9 #ifndef __ASM_GENERIC_ATOMIC_H                 !! 134 
 10 #define __ASM_GENERIC_ATOMIC_H                 !! 135 /*
 11                                                !! 136  * atomic_add - add integer to atomic variable
 12 #include <asm/cmpxchg.h>                       !! 137  * @i: integer value to add
 13 #include <asm/barrier.h>                       !! 138  * @v: pointer of type atomic_t
 14                                                !! 139  *
 15 #ifdef CONFIG_SMP                              !! 140  * Atomically adds @i to @v.  Note that the guaranteed useful range
 16                                                !! 141  * of an atomic_t is only 24 bits.
 17 /* we can build all atomic primitives from cmp !! 142  */
 18                                                !! 143 static __inline__ void atomic_add(int i, atomic_t * v)
 19 #define ATOMIC_OP(op, c_op)                    !! 144 {
 20 static inline void generic_atomic_##op(int i,  !! 145         unsigned long temp;
 21 {                                              !! 146 
 22         int c, old;                            !! 147         __asm__ __volatile__(
 23                                                !! 148         "1:     ll      %0, %1          # atomic_add            \n"
 24         c = v->counter;                        !! 149         "       addu    %0, %2                                  \n"
 25         while ((old = arch_cmpxchg(&v->counter !! 150         "       sc      %0, %1                                  \n"
 26                 c = old;                       !! 151         "       beqz    %0, 1b                                  \n"
 27 }                                              !! 152         : "=&r" (temp), "=m" (v->counter)
 28                                                !! 153         : "Ir" (i), "m" (v->counter));
 29 #define ATOMIC_OP_RETURN(op, c_op)             !! 154 }
 30 static inline int generic_atomic_##op##_return !! 155 
 31 {                                              !! 156 /*
 32         int c, old;                            !! 157  * atomic_sub - subtract the atomic variable
 33                                                !! 158  * @i: integer value to subtract
 34         c = v->counter;                        !! 159  * @v: pointer of type atomic_t
 35         while ((old = arch_cmpxchg(&v->counter !! 160  *
 36                 c = old;                       !! 161  * Atomically subtracts @i from @v.  Note that the guaranteed
 37                                                !! 162  * useful range of an atomic_t is only 24 bits.
 38         return c c_op i;                       !! 163  */
 39 }                                              !! 164 static __inline__ void atomic_sub(int i, atomic_t * v)
 40                                                !! 165 {
 41 #define ATOMIC_FETCH_OP(op, c_op)              !! 166         unsigned long temp;
 42 static inline int generic_atomic_fetch_##op(in !! 167 
 43 {                                              !! 168         __asm__ __volatile__(
 44         int c, old;                            !! 169         "       .set    noreorder       # atomic_sub            \n"
 45                                                !! 170         "1:     ll      %0, %1                                  \n"
 46         c = v->counter;                        !! 171         "       subu    %0, %2                                  \n"
 47         while ((old = arch_cmpxchg(&v->counter !! 172         "       sc      %0, %1                                  \n"
 48                 c = old;                       !! 173         "       beqz    %0, 1b                                  \n"
 49                                                !! 174         "       .set    reorder                                 \n"
 50         return c;                              !! 175         : "=&r" (temp), "=m" (v->counter)
                                                   >> 176         : "Ir" (i), "m" (v->counter));
                                                   >> 177 }
                                                   >> 178 
                                                   >> 179 /*
                                                   >> 180  * Same as above, but return the result value
                                                   >> 181  */
                                                   >> 182 static __inline__ int atomic_add_return(int i, atomic_t * v)
                                                   >> 183 {
                                                   >> 184         unsigned long temp, result;
                                                   >> 185 
                                                   >> 186         __asm__ __volatile__(
                                                   >> 187         "       .set    noreorder       # atomic_add_return     \n"
                                                   >> 188         "1:     ll      %1, %2                                  \n"
                                                   >> 189         "       addu    %0, %1, %3                              \n"
                                                   >> 190         "       sc      %0, %2                                  \n"
                                                   >> 191         "       beqz    %0, 1b                                  \n"
                                                   >> 192         "       addu    %0, %1, %3                              \n"
                                                   >> 193         "       sync                                            \n"
                                                   >> 194         "       .set    reorder                                 \n"
                                                   >> 195         : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                                                   >> 196         : "Ir" (i), "m" (v->counter)
                                                   >> 197         : "memory");
                                                   >> 198 
                                                   >> 199         return result;
                                                   >> 200 }
                                                   >> 201 
                                                   >> 202 static __inline__ int atomic_sub_return(int i, atomic_t * v)
                                                   >> 203 {
                                                   >> 204         unsigned long temp, result;
                                                   >> 205 
                                                   >> 206         __asm__ __volatile__(
                                                   >> 207         "       .set    noreorder       # atomic_sub_return     \n"
                                                   >> 208         "1:     ll      %1, %2                                  \n"
                                                   >> 209         "       subu    %0, %1, %3                              \n"
                                                   >> 210         "       sc      %0, %2                                  \n"
                                                   >> 211         "       beqz    %0, 1b                                  \n"
                                                   >> 212         "       subu    %0, %1, %3                              \n"
                                                   >> 213         "       sync                                            \n"
                                                   >> 214         "       .set    reorder                                 \n"
                                                   >> 215         : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                                                   >> 216         : "Ir" (i), "m" (v->counter)
                                                   >> 217         : "memory");
                                                   >> 218 
                                                   >> 219         return result;
                                                   >> 220 }
                                                   >> 221 #endif
                                                   >> 222 
                                                   >> 223 #ifndef CONFIG_CPU_HAS_LLDSCD
                                                   >> 224 
                                                   >> 225 /*
                                                   >> 226  * This implementation is only atomic with respect to interrupts.  It can't
                                                   >> 227  * be used on SMP
                                                   >> 228  *
                                                   >> 229  * atomic64_add - add integer to atomic variable
                                                   >> 230  * @i: integer value to add
                                                   >> 231  * @v: pointer of type atomic64_t
                                                   >> 232  *
                                                   >> 233  * Atomically adds @i to @v.
                                                   >> 234  */
                                                   >> 235 static __inline__ void atomic64_add(int i, atomic64_t * v)
                                                   >> 236 {
                                                   >> 237         unsigned long flags;
                                                   >> 238 
                                                   >> 239         local_irq_save(flags);
                                                   >> 240         v->counter += i;
                                                   >> 241         local_irq_restore(flags);
                                                   >> 242 }
                                                   >> 243 
                                                   >> 244 /*
                                                   >> 245  * atomic64_sub - subtract the atomic variable
                                                   >> 246  * @i: integer value to subtract
                                                   >> 247  * @v: pointer of type atomic64_t
                                                   >> 248  *
                                                   >> 249  * Atomically subtracts @i from @v.
                                                   >> 250  */
                                                   >> 251 static __inline__ void atomic64_sub(int i, atomic64_t * v)
                                                   >> 252 {
                                                   >> 253         unsigned long flags;
                                                   >> 254 
                                                   >> 255         local_irq_save(flags);
                                                   >> 256         v->counter -= i;
                                                   >> 257         local_irq_restore(flags);
                                                   >> 258 }
                                                   >> 259 
                                                   >> 260 static __inline__ int atomic64_add_return(int i, atomic64_t * v)
                                                   >> 261 {
                                                   >> 262         unsigned long flags;
                                                   >> 263         int temp;
                                                   >> 264 
                                                   >> 265         local_irq_save(flags);
                                                   >> 266         temp = v->counter;
                                                   >> 267         temp += i;
                                                   >> 268         v->counter = temp;
                                                   >> 269         local_irq_restore(flags);
                                                   >> 270 
                                                   >> 271         return temp;
                                                   >> 272 }
                                                   >> 273 
                                                   >> 274 static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
                                                   >> 275 {
                                                   >> 276         unsigned long flags;
                                                   >> 277         int temp;
                                                   >> 278 
                                                   >> 279         local_irq_save(flags);
                                                   >> 280         temp = v->counter;
                                                   >> 281         temp -= i;
                                                   >> 282         v->counter = temp;
                                                   >> 283         local_irq_restore(flags);
                                                   >> 284 
                                                   >> 285         return temp;
 51 }                                                 286 }
 52                                                   287 
 53 #else                                             288 #else
 54                                                   289 
 55 #include <linux/irqflags.h>                    !! 290 /*
                                                   >> 291  * ... while for MIPS III and better we can use ll/sc instruction.  This
                                                   >> 292  * implementation is SMP safe ...
                                                   >> 293  */
                                                   >> 294 
                                                   >> 295 /*
                                                   >> 296  * atomic64_add - add integer to atomic variable
                                                   >> 297  * @i: integer value to add
                                                   >> 298  * @v: pointer of type atomic64_t
                                                   >> 299  *
                                                   >> 300  * Atomically adds @i to @v.
                                                   >> 301  */
                                                   >> 302 static __inline__ void atomic64_add(int i, atomic64_t * v)
                                                   >> 303 {
                                                   >> 304         unsigned long temp;
                                                   >> 305 
                                                   >> 306         __asm__ __volatile__(
                                                   >> 307         "1:     ll      %0, %1          # atomic64_add          \n"
                                                   >> 308         "       addu    %0, %2                                  \n"
                                                   >> 309         "       sc      %0, %1                                  \n"
                                                   >> 310         "       beqz    %0, 1b                                  \n"
                                                   >> 311         : "=&r" (temp), "=m" (v->counter)
                                                   >> 312         : "Ir" (i), "m" (v->counter));
                                                   >> 313 }
                                                   >> 314 
                                                   >> 315 /*
                                                   >> 316  * atomic64_sub - subtract the atomic variable
                                                   >> 317  * @i: integer value to subtract
                                                   >> 318  * @v: pointer of type atomic64_t
                                                   >> 319  *
                                                   >> 320  * Atomically subtracts @i from @v.
                                                   >> 321  */
                                                   >> 322 static __inline__ void atomic64_sub(int i, atomic64_t * v)
                                                   >> 323 {
                                                   >> 324         unsigned long temp;
                                                   >> 325 
                                                   >> 326         __asm__ __volatile__(
                                                   >> 327         "       .set    noreorder       # atomic64_sub          \n"
                                                   >> 328         "1:     ll      %0, %1                                  \n"
                                                   >> 329         "       subu    %0, %2                                  \n"
                                                   >> 330         "       sc      %0, %1                                  \n"
                                                   >> 331         "       beqz    %0, 1b                                  \n"
                                                   >> 332         "       .set    reorder                                 \n"
                                                   >> 333         : "=&r" (temp), "=m" (v->counter)
                                                   >> 334         : "Ir" (i), "m" (v->counter));
                                                   >> 335 }
                                                   >> 336 
                                                   >> 337 /*
                                                   >> 338  * Same as above, but return the result value
                                                   >> 339  */
                                                   >> 340 static __inline__ int atomic64_add_return(int i, atomic64_t * v)
                                                   >> 341 {
                                                   >> 342         unsigned long temp, result;
                                                   >> 343 
                                                   >> 344         __asm__ __volatile__(
                                                   >> 345         "       .set    noreorder       # atomic64_add_return   \n"
                                                   >> 346         "1:     ll      %1, %2                                  \n"
                                                   >> 347         "       addu    %0, %1, %3                              \n"
                                                   >> 348         "       sc      %0, %2                                  \n"
                                                   >> 349         "       beqz    %0, 1b                                  \n"
                                                   >> 350         "       addu    %0, %1, %3                              \n"
                                                   >> 351         "       sync                                            \n"
                                                   >> 352         "       .set    reorder                                 \n"
                                                   >> 353         : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                                                   >> 354         : "Ir" (i), "m" (v->counter)
                                                   >> 355         : "memory");
                                                   >> 356 
                                                   >> 357         return result;
                                                   >> 358 }
                                                   >> 359 
                                                   >> 360 static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
                                                   >> 361 {
                                                   >> 362         unsigned long temp, result;
                                                   >> 363 
                                                   >> 364         __asm__ __volatile__(
                                                   >> 365         "       .set    noreorder       # atomic64_sub_return   \n"
                                                   >> 366         "1:     ll      %1, %2                                  \n"
                                                   >> 367         "       subu    %0, %1, %3                              \n"
                                                   >> 368         "       sc      %0, %2                                  \n"
                                                   >> 369         "       beqz    %0, 1b                                  \n"
                                                   >> 370         "       subu    %0, %1, %3                              \n"
                                                   >> 371         "       sync                                            \n"
                                                   >> 372         "       .set    reorder                                 \n"
                                                   >> 373         : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                                                   >> 374         : "Ir" (i), "m" (v->counter)
                                                   >> 375         : "memory");
                                                   >> 376 
                                                   >> 377         return result;
                                                   >> 378 }
                                                   >> 379 #endif
                                                   >> 380 
                                                   >> 381 #define atomic_dec_return(v) atomic_sub_return(1,(v))
                                                   >> 382 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
                                                   >> 383 #define atomic_inc_return(v) atomic_add_return(1,(v))
                                                   >> 384 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
                                                   >> 385 
                                                   >> 386 /*
                                                   >> 387  * atomic_sub_and_test - subtract value from variable and test result
                                                   >> 388  * @i: integer value to subtract
                                                   >> 389  * @v: pointer of type atomic_t
                                                   >> 390  *
                                                   >> 391  * Atomically subtracts @i from @v and returns
                                                   >> 392  * true if the result is zero, or false for all
                                                   >> 393  * other cases.  Note that the guaranteed
                                                   >> 394  * useful range of an atomic_t is only 24 bits.
                                                   >> 395  */
                                                   >> 396 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
                                                   >> 397 
                                                   >> 398 /*
                                                   >> 399  * atomic64_sub_and_test - subtract value from variable and test result
                                                   >> 400  * @i: integer value to subtract
                                                   >> 401  * @v: pointer of type atomic64_t
                                                   >> 402  *
                                                   >> 403  * Atomically subtracts @i from @v and returns
                                                   >> 404  * true if the result is zero, or false for all
                                                   >> 405  * other cases.
                                                   >> 406  */
                                                   >> 407 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
                                                   >> 408 
                                                   >> 409 /*
                                                   >> 410  * atomic_inc_and_test - increment and test
                                                   >> 411  * @v: pointer of type atomic_t
                                                   >> 412  *
                                                   >> 413  * Atomically increments @v by 1
                                                   >> 414  * and returns true if the result is zero, or false for all
                                                   >> 415  * other cases.  Note that the guaranteed
                                                   >> 416  * useful range of an atomic_t is only 24 bits.
                                                   >> 417  */
                                                   >> 418 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
                                                   >> 419 
                                                   >> 420 /*
                                                   >> 421  * atomic64_inc_and_test - increment and test
                                                   >> 422  * @v: pointer of type atomic64_t
                                                   >> 423  *
                                                   >> 424  * Atomically increments @v by 1
                                                   >> 425  * and returns true if the result is zero, or false for all
                                                   >> 426  * other cases.
                                                   >> 427  */
                                                   >> 428 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
                                                   >> 429 
                                                   >> 430 /*
                                                   >> 431  * atomic_dec_and_test - decrement by 1 and test
                                                   >> 432  * @v: pointer of type atomic_t
                                                   >> 433  *
                                                   >> 434  * Atomically decrements @v by 1 and
                                                   >> 435  * returns true if the result is 0, or false for all other
                                                   >> 436  * cases.  Note that the guaranteed
                                                   >> 437  * useful range of an atomic_t is only 24 bits.
                                                   >> 438  */
                                                   >> 439 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
                                                   >> 440 
                                                   >> 441 /*
                                                   >> 442  * atomic64_dec_and_test - decrement by 1 and test
                                                   >> 443  * @v: pointer of type atomic64_t
                                                   >> 444  *
                                                   >> 445  * Atomically decrements @v by 1 and
                                                   >> 446  * returns true if the result is 0, or false for all other
                                                   >> 447  * cases.
                                                   >> 448  */
                                                   >> 449 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
                                                   >> 450 
                                                   >> 451 /*
                                                   >> 452  * atomic_inc - increment atomic variable
                                                   >> 453  * @v: pointer of type atomic_t
                                                   >> 454  *
                                                   >> 455  * Atomically increments @v by 1.  Note that the guaranteed
                                                   >> 456  * useful range of an atomic_t is only 24 bits.
                                                   >> 457  */
                                                   >> 458 #define atomic_inc(v) atomic_add(1,(v))
                                                   >> 459 
                                                   >> 460 /*
                                                   >> 461  * atomic64_inc - increment atomic variable
                                                   >> 462  * @v: pointer of type atomic64_t
                                                   >> 463  *
                                                   >> 464  * Atomically increments @v by 1.
                                                   >> 465  */
                                                   >> 466 #define atomic64_inc(v) atomic64_add(1,(v))
                                                   >> 467 
                                                   >> 468 /*
                                                   >> 469  * atomic_dec - decrement and test
                                                   >> 470  * @v: pointer of type atomic_t
                                                   >> 471  *
                                                   >> 472  * Atomically decrements @v by 1.  Note that the guaranteed
                                                   >> 473  * useful range of an atomic_t is only 24 bits.
                                                   >> 474  */
                                                   >> 475 #define atomic_dec(v) atomic_sub(1,(v))
                                                   >> 476 
                                                   >> 477 /*
                                                   >> 478  * atomic64_dec - decrement and test
                                                   >> 479  * @v: pointer of type atomic64_t
                                                   >> 480  *
                                                   >> 481  * Atomically decrements @v by 1.
                                                   >> 482  */
                                                   >> 483 #define atomic64_dec(v) atomic64_sub(1,(v))
                                                   >> 484 
                                                   >> 485 /*
                                                   >> 486  * atomic_add_negative - add and test if negative
                                                   >> 487  * @v: pointer of type atomic_t
                                                   >> 488  * @i: integer value to add
                                                   >> 489  *
                                                   >> 490  * Atomically adds @i to @v and returns true
                                                   >> 491  * if the result is negative, or false when
                                                   >> 492  * result is greater than or equal to zero.  Note that the guaranteed
                                                   >> 493  * useful range of an atomic_t is only 24 bits.
                                                   >> 494  */
                                                   >> 495 #define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
                                                   >> 496 
                                                   >> 497 /*
                                                   >> 498  * atomic64_add_negative - add and test if negative
                                                   >> 499  * @v: pointer of type atomic64_t
                                                   >> 500  * @i: integer value to add
                                                   >> 501  *
                                                   >> 502  * Atomically adds @i to @v and returns true
                                                   >> 503  * if the result is negative, or false when
                                                   >> 504  * result is greater than or equal to zero.
                                                   >> 505  */
                                                   >> 506 #define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0)
 56                                                   507 
 57 #define ATOMIC_OP(op, c_op)                    !! 508 /* Atomic operations are already serializing */
 58 static inline void generic_atomic_##op(int i,  !! 509 #define smp_mb__before_atomic_dec()     smp_mb()
 59 {                                              !! 510 #define smp_mb__after_atomic_dec()      smp_mb()
 60         unsigned long flags;                   !! 511 #define smp_mb__before_atomic_inc()     smp_mb()
 61                                                !! 512 #define smp_mb__after_atomic_inc()      smp_mb()
 62         raw_local_irq_save(flags);             << 
 63         v->counter = v->counter c_op i;        << 
 64         raw_local_irq_restore(flags);          << 
 65 }                                              << 
 66                                                << 
 67 #define ATOMIC_OP_RETURN(op, c_op)             << 
 68 static inline int generic_atomic_##op##_return << 
 69 {                                              << 
 70         unsigned long flags;                   << 
 71         int ret;                               << 
 72                                                << 
 73         raw_local_irq_save(flags);             << 
 74         ret = (v->counter = v->counter c_op i) << 
 75         raw_local_irq_restore(flags);          << 
 76                                                << 
 77         return ret;                            << 
 78 }                                              << 
 79                                                << 
 80 #define ATOMIC_FETCH_OP(op, c_op)              << 
 81 static inline int generic_atomic_fetch_##op(in << 
 82 {                                              << 
 83         unsigned long flags;                   << 
 84         int ret;                               << 
 85                                                << 
 86         raw_local_irq_save(flags);             << 
 87         ret = v->counter;                      << 
 88         v->counter = v->counter c_op i;        << 
 89         raw_local_irq_restore(flags);          << 
 90                                                << 
 91         return ret;                            << 
 92 }                                              << 
 93                                                << 
 94 #endif /* CONFIG_SMP */                        << 
 95                                                << 
 96 ATOMIC_OP_RETURN(add, +)                       << 
 97 ATOMIC_OP_RETURN(sub, -)                       << 
 98                                                << 
 99 ATOMIC_FETCH_OP(add, +)                        << 
100 ATOMIC_FETCH_OP(sub, -)                        << 
101 ATOMIC_FETCH_OP(and, &)                        << 
102 ATOMIC_FETCH_OP(or, |)                         << 
103 ATOMIC_FETCH_OP(xor, ^)                        << 
104                                                << 
105 ATOMIC_OP(add, +)                              << 
106 ATOMIC_OP(sub, -)                              << 
107 ATOMIC_OP(and, &)                              << 
108 ATOMIC_OP(or, |)                               << 
109 ATOMIC_OP(xor, ^)                              << 
110                                                << 
111 #undef ATOMIC_FETCH_OP                         << 
112 #undef ATOMIC_OP_RETURN                        << 
113 #undef ATOMIC_OP                               << 
114                                                << 
115 #define arch_atomic_add_return                 << 
116 #define arch_atomic_sub_return                 << 
117                                                << 
118 #define arch_atomic_fetch_add                  << 
119 #define arch_atomic_fetch_sub                  << 
120 #define arch_atomic_fetch_and                  << 
121 #define arch_atomic_fetch_or                   << 
122 #define arch_atomic_fetch_xor                  << 
123                                                << 
124 #define arch_atomic_add                        << 
125 #define arch_atomic_sub                        << 
126 #define arch_atomic_and                        << 
127 #define arch_atomic_or                         << 
128 #define arch_atomic_xor                        << 
129                                                   513 
130 #define arch_atomic_read(v)                    !! 514 #endif /* defined(__KERNEL__) */
131 #define arch_atomic_set(v, i)                  << 
132                                                   515 
133 #endif /* __ASM_GENERIC_ATOMIC_H */            !! 516 #endif /* _ASM_ATOMIC_H */
134                                                   517 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php