~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/atomic64.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Generic implementation of 64-bit atomics using spinlocks,
  4  * useful on processors that don't have 64-bit atomic instructions.
  5  *
  6  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  7  */
  8 #include <linux/types.h>
  9 #include <linux/cache.h>
 10 #include <linux/spinlock.h>
 11 #include <linux/init.h>
 12 #include <linux/export.h>
 13 #include <linux/atomic.h>
 14 
 15 /*
 16  * We use a hashed array of spinlocks to provide exclusive access
 17  * to each atomic64_t variable.  Since this is expected to used on
 18  * systems with small numbers of CPUs (<= 4 or so), we use a
 19  * relatively small array of 16 spinlocks to avoid wasting too much
 20  * memory on the spinlock array.
 21  */
 22 #define NR_LOCKS        16
 23 
 24 /*
 25  * Ensure each lock is in a separate cacheline.
 26  */
 27 static union {
 28         raw_spinlock_t lock;
 29         char pad[L1_CACHE_BYTES];
 30 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
 31         [0 ... (NR_LOCKS - 1)] = {
 32                 .lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
 33         },
 34 };
 35 
 36 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
 37 {
 38         unsigned long addr = (unsigned long) v;
 39 
 40         addr >>= L1_CACHE_SHIFT;
 41         addr ^= (addr >> 8) ^ (addr >> 16);
 42         return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
 43 }
 44 
 45 s64 generic_atomic64_read(const atomic64_t *v)
 46 {
 47         unsigned long flags;
 48         raw_spinlock_t *lock = lock_addr(v);
 49         s64 val;
 50 
 51         raw_spin_lock_irqsave(lock, flags);
 52         val = v->counter;
 53         raw_spin_unlock_irqrestore(lock, flags);
 54         return val;
 55 }
 56 EXPORT_SYMBOL(generic_atomic64_read);
 57 
 58 void generic_atomic64_set(atomic64_t *v, s64 i)
 59 {
 60         unsigned long flags;
 61         raw_spinlock_t *lock = lock_addr(v);
 62 
 63         raw_spin_lock_irqsave(lock, flags);
 64         v->counter = i;
 65         raw_spin_unlock_irqrestore(lock, flags);
 66 }
 67 EXPORT_SYMBOL(generic_atomic64_set);
 68 
 69 #define ATOMIC64_OP(op, c_op)                                           \
 70 void generic_atomic64_##op(s64 a, atomic64_t *v)                        \
 71 {                                                                       \
 72         unsigned long flags;                                            \
 73         raw_spinlock_t *lock = lock_addr(v);                            \
 74                                                                         \
 75         raw_spin_lock_irqsave(lock, flags);                             \
 76         v->counter c_op a;                                              \
 77         raw_spin_unlock_irqrestore(lock, flags);                        \
 78 }                                                                       \
 79 EXPORT_SYMBOL(generic_atomic64_##op);
 80 
 81 #define ATOMIC64_OP_RETURN(op, c_op)                                    \
 82 s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v)                \
 83 {                                                                       \
 84         unsigned long flags;                                            \
 85         raw_spinlock_t *lock = lock_addr(v);                            \
 86         s64 val;                                                        \
 87                                                                         \
 88         raw_spin_lock_irqsave(lock, flags);                             \
 89         val = (v->counter c_op a);                                      \
 90         raw_spin_unlock_irqrestore(lock, flags);                        \
 91         return val;                                                     \
 92 }                                                                       \
 93 EXPORT_SYMBOL(generic_atomic64_##op##_return);
 94 
 95 #define ATOMIC64_FETCH_OP(op, c_op)                                     \
 96 s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v)                   \
 97 {                                                                       \
 98         unsigned long flags;                                            \
 99         raw_spinlock_t *lock = lock_addr(v);                            \
100         s64 val;                                                        \
101                                                                         \
102         raw_spin_lock_irqsave(lock, flags);                             \
103         val = v->counter;                                               \
104         v->counter c_op a;                                              \
105         raw_spin_unlock_irqrestore(lock, flags);                        \
106         return val;                                                     \
107 }                                                                       \
108 EXPORT_SYMBOL(generic_atomic64_fetch_##op);
109 
110 #define ATOMIC64_OPS(op, c_op)                                          \
111         ATOMIC64_OP(op, c_op)                                           \
112         ATOMIC64_OP_RETURN(op, c_op)                                    \
113         ATOMIC64_FETCH_OP(op, c_op)
114 
115 ATOMIC64_OPS(add, +=)
116 ATOMIC64_OPS(sub, -=)
117 
118 #undef ATOMIC64_OPS
119 #define ATOMIC64_OPS(op, c_op)                                          \
120         ATOMIC64_OP(op, c_op)                                           \
121         ATOMIC64_FETCH_OP(op, c_op)
122 
123 ATOMIC64_OPS(and, &=)
124 ATOMIC64_OPS(or, |=)
125 ATOMIC64_OPS(xor, ^=)
126 
127 #undef ATOMIC64_OPS
128 #undef ATOMIC64_FETCH_OP
129 #undef ATOMIC64_OP
130 
131 s64 generic_atomic64_dec_if_positive(atomic64_t *v)
132 {
133         unsigned long flags;
134         raw_spinlock_t *lock = lock_addr(v);
135         s64 val;
136 
137         raw_spin_lock_irqsave(lock, flags);
138         val = v->counter - 1;
139         if (val >= 0)
140                 v->counter = val;
141         raw_spin_unlock_irqrestore(lock, flags);
142         return val;
143 }
144 EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
145 
146 s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
147 {
148         unsigned long flags;
149         raw_spinlock_t *lock = lock_addr(v);
150         s64 val;
151 
152         raw_spin_lock_irqsave(lock, flags);
153         val = v->counter;
154         if (val == o)
155                 v->counter = n;
156         raw_spin_unlock_irqrestore(lock, flags);
157         return val;
158 }
159 EXPORT_SYMBOL(generic_atomic64_cmpxchg);
160 
161 s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
162 {
163         unsigned long flags;
164         raw_spinlock_t *lock = lock_addr(v);
165         s64 val;
166 
167         raw_spin_lock_irqsave(lock, flags);
168         val = v->counter;
169         v->counter = new;
170         raw_spin_unlock_irqrestore(lock, flags);
171         return val;
172 }
173 EXPORT_SYMBOL(generic_atomic64_xchg);
174 
175 s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
176 {
177         unsigned long flags;
178         raw_spinlock_t *lock = lock_addr(v);
179         s64 val;
180 
181         raw_spin_lock_irqsave(lock, flags);
182         val = v->counter;
183         if (val != u)
184                 v->counter += a;
185         raw_spin_unlock_irqrestore(lock, flags);
186 
187         return val;
188 }
189 EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
190 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php