~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/asm-generic/qspinlock.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 /*
  3  * Queued spinlock
  4  *
  5  * A 'generic' spinlock implementation that is based on MCS locks. For an
  6  * architecture that's looking for a 'generic' spinlock, please first consider
  7  * ticket-lock.h and only come looking here when you've considered all the
  8  * constraints below and can show your hardware does actually perform better
  9  * with qspinlock.
 10  *
 11  * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
 12  * weaker than RCtso if you're power), where regular code only expects atomic_t
 13  * to be RCpc.
 14  *
 15  * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
 16  * of atomic operations to behave well together, please audit them carefully to
 17  * ensure they all have forward progress. Many atomic operations may default to
 18  * cmpxchg() loops which will not have good forward progress properties on
 19  * LL/SC architectures.
 20  *
 21  * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
 22  * do. Carefully read the patches that introduced
 23  * queued_fetch_set_pending_acquire().
 24  *
 25  * qspinlock also heavily relies on mixed size atomic operations, in specific
 26  * it requires architectures to have xchg16; something which many LL/SC
 27  * architectures need to implement as a 32bit and+or in order to satisfy the
 28  * forward progress guarantees mentioned above.
 29  *
 30  * Further reading on mixed size atomics that might be relevant:
 31  *
 32  *   http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
 33  *
 34  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 35  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 36  *
 37  * Authors: Waiman Long <waiman.long@hpe.com>
 38  */
 39 #ifndef __ASM_GENERIC_QSPINLOCK_H
 40 #define __ASM_GENERIC_QSPINLOCK_H
 41 
 42 #include <asm-generic/qspinlock_types.h>
 43 #include <linux/atomic.h>
 44 
 45 #ifndef queued_spin_is_locked
 46 /**
 47  * queued_spin_is_locked - is the spinlock locked?
 48  * @lock: Pointer to queued spinlock structure
 49  * Return: 1 if it is locked, 0 otherwise
 50  */
 51 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 52 {
 53         /*
 54          * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
 55          * isn't immediately observable.
 56          */
 57         return atomic_read(&lock->val);
 58 }
 59 #endif
 60 
 61 /**
 62  * queued_spin_value_unlocked - is the spinlock structure unlocked?
 63  * @lock: queued spinlock structure
 64  * Return: 1 if it is unlocked, 0 otherwise
 65  *
 66  * N.B. Whenever there are tasks waiting for the lock, it is considered
 67  *      locked wrt the lockref code to avoid lock stealing by the lockref
 68  *      code and change things underneath the lock. This also allows some
 69  *      optimizations to be applied without conflict with lockref.
 70  */
 71 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
 72 {
 73         return !lock.val.counter;
 74 }
 75 
 76 /**
 77  * queued_spin_is_contended - check if the lock is contended
 78  * @lock : Pointer to queued spinlock structure
 79  * Return: 1 if lock contended, 0 otherwise
 80  */
 81 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
 82 {
 83         return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
 84 }
 85 /**
 86  * queued_spin_trylock - try to acquire the queued spinlock
 87  * @lock : Pointer to queued spinlock structure
 88  * Return: 1 if lock acquired, 0 if failed
 89  */
 90 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 91 {
 92         int val = atomic_read(&lock->val);
 93 
 94         if (unlikely(val))
 95                 return 0;
 96 
 97         return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
 98 }
 99 
100 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
101 
102 #ifndef queued_spin_lock
103 /**
104  * queued_spin_lock - acquire a queued spinlock
105  * @lock: Pointer to queued spinlock structure
106  */
107 static __always_inline void queued_spin_lock(struct qspinlock *lock)
108 {
109         int val = 0;
110 
111         if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
112                 return;
113 
114         queued_spin_lock_slowpath(lock, val);
115 }
116 #endif
117 
118 #ifndef queued_spin_unlock
119 /**
120  * queued_spin_unlock - release a queued spinlock
121  * @lock : Pointer to queued spinlock structure
122  */
123 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
124 {
125         /*
126          * unlock() needs release semantics:
127          */
128         smp_store_release(&lock->locked, 0);
129 }
130 #endif
131 
132 #ifndef virt_spin_lock
133 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
134 {
135         return false;
136 }
137 #endif
138 
139 /*
140  * Remapping spinlock architecture specific functions to the corresponding
141  * queued spinlock functions.
142  */
143 #define arch_spin_is_locked(l)          queued_spin_is_locked(l)
144 #define arch_spin_is_contended(l)       queued_spin_is_contended(l)
145 #define arch_spin_value_unlocked(l)     queued_spin_value_unlocked(l)
146 #define arch_spin_lock(l)               queued_spin_lock(l)
147 #define arch_spin_trylock(l)            queued_spin_trylock(l)
148 #define arch_spin_unlock(l)             queued_spin_unlock(l)
149 
150 #endif /* __ASM_GENERIC_QSPINLOCK_H */
151 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php