~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arc/include/asm/atomic-spinlock.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 
  3 #ifndef _ASM_ARC_ATOMIC_SPLOCK_H
  4 #define _ASM_ARC_ATOMIC_SPLOCK_H
  5 
  6 /*
  7  * Non hardware assisted Atomic-R-M-W
  8  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
  9  */
 10 
 11 static inline void arch_atomic_set(atomic_t *v, int i)
 12 {
 13         /*
 14          * Independent of hardware support, all of the atomic_xxx() APIs need
 15          * to follow the same locking rules to make sure that a "hardware"
 16          * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
 17          * sequence
 18          *
 19          * Thus atomic_set() despite being 1 insn (and seemingly atomic)
 20          * requires the locking.
 21          */
 22         unsigned long flags;
 23 
 24         atomic_ops_lock(flags);
 25         WRITE_ONCE(v->counter, i);
 26         atomic_ops_unlock(flags);
 27 }
 28 
 29 #define arch_atomic_set_release(v, i)   arch_atomic_set((v), (i))
 30 
 31 #define ATOMIC_OP(op, c_op, asm_op)                                     \
 32 static inline void arch_atomic_##op(int i, atomic_t *v)                 \
 33 {                                                                       \
 34         unsigned long flags;                                            \
 35                                                                         \
 36         atomic_ops_lock(flags);                                         \
 37         v->counter c_op i;                                              \
 38         atomic_ops_unlock(flags);                                       \
 39 }
 40 
 41 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
 42 static inline int arch_atomic_##op##_return(int i, atomic_t *v)         \
 43 {                                                                       \
 44         unsigned long flags;                                            \
 45         unsigned int temp;                                              \
 46                                                                         \
 47         /*                                                              \
 48          * spin lock/unlock provides the needed smp_mb() before/after   \
 49          */                                                             \
 50         atomic_ops_lock(flags);                                         \
 51         temp = v->counter;                                              \
 52         temp c_op i;                                                    \
 53         v->counter = temp;                                              \
 54         atomic_ops_unlock(flags);                                       \
 55                                                                         \
 56         return temp;                                                    \
 57 }
 58 
 59 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
 60 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)            \
 61 {                                                                       \
 62         unsigned long flags;                                            \
 63         unsigned int orig;                                              \
 64                                                                         \
 65         /*                                                              \
 66          * spin lock/unlock provides the needed smp_mb() before/after   \
 67          */                                                             \
 68         atomic_ops_lock(flags);                                         \
 69         orig = v->counter;                                              \
 70         v->counter c_op i;                                              \
 71         atomic_ops_unlock(flags);                                       \
 72                                                                         \
 73         return orig;                                                    \
 74 }
 75 
 76 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
 77         ATOMIC_OP(op, c_op, asm_op)                                     \
 78         ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
 79         ATOMIC_FETCH_OP(op, c_op, asm_op)
 80 
 81 ATOMIC_OPS(add, +=, add)
 82 ATOMIC_OPS(sub, -=, sub)
 83 
 84 #define arch_atomic_fetch_add           arch_atomic_fetch_add
 85 #define arch_atomic_fetch_sub           arch_atomic_fetch_sub
 86 #define arch_atomic_add_return          arch_atomic_add_return
 87 #define arch_atomic_sub_return          arch_atomic_sub_return
 88 
 89 #undef ATOMIC_OPS
 90 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
 91         ATOMIC_OP(op, c_op, asm_op)                                     \
 92         ATOMIC_FETCH_OP(op, c_op, asm_op)
 93 
 94 ATOMIC_OPS(and, &=, and)
 95 ATOMIC_OPS(andnot, &= ~, bic)
 96 ATOMIC_OPS(or, |=, or)
 97 ATOMIC_OPS(xor, ^=, xor)
 98 
 99 #define arch_atomic_andnot              arch_atomic_andnot
100 
101 #define arch_atomic_fetch_and           arch_atomic_fetch_and
102 #define arch_atomic_fetch_andnot        arch_atomic_fetch_andnot
103 #define arch_atomic_fetch_or            arch_atomic_fetch_or
104 #define arch_atomic_fetch_xor           arch_atomic_fetch_xor
105 
106 #undef ATOMIC_OPS
107 #undef ATOMIC_FETCH_OP
108 #undef ATOMIC_OP_RETURN
109 #undef ATOMIC_OP
110 
111 #endif
112 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php