~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/qspinlock.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_QSPINLOCK_H
  3 #define _ASM_X86_QSPINLOCK_H
  4 
  5 #include <linux/jump_label.h>
  6 #include <asm/cpufeature.h>
  7 #include <asm-generic/qspinlock_types.h>
  8 #include <asm/paravirt.h>
  9 #include <asm/rmwcc.h>
 10 
 11 #define _Q_PENDING_LOOPS        (1 << 9)
 12 
 13 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
 14 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
 15 {
 16         u32 val;
 17 
 18         /*
 19          * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
 20          * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
 21          * statement expression, which GCC doesn't like.
 22          */
 23         val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
 24                                "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
 25         val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
 26 
 27         return val;
 28 }
 29 
 30 #ifdef CONFIG_PARAVIRT_SPINLOCKS
 31 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 32 extern void __pv_init_lock_hash(void);
 33 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 34 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
 35 extern bool nopvspin;
 36 
 37 #define queued_spin_unlock queued_spin_unlock
 38 /**
 39  * queued_spin_unlock - release a queued spinlock
 40  * @lock : Pointer to queued spinlock structure
 41  *
 42  * A smp_store_release() on the least-significant byte.
 43  */
 44 static inline void native_queued_spin_unlock(struct qspinlock *lock)
 45 {
 46         smp_store_release(&lock->locked, 0);
 47 }
 48 
 49 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 50 {
 51         pv_queued_spin_lock_slowpath(lock, val);
 52 }
 53 
 54 static inline void queued_spin_unlock(struct qspinlock *lock)
 55 {
 56         kcsan_release();
 57         pv_queued_spin_unlock(lock);
 58 }
 59 
 60 #define vcpu_is_preempted vcpu_is_preempted
 61 static inline bool vcpu_is_preempted(long cpu)
 62 {
 63         return pv_vcpu_is_preempted(cpu);
 64 }
 65 #endif
 66 
 67 #ifdef CONFIG_PARAVIRT
 68 /*
 69  * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
 70  *
 71  * Native (and PV wanting native due to vCPU pinning) should keep this key
 72  * disabled. Native does not touch the key.
 73  *
 74  * When in a guest then native_pv_lock_init() enables the key first and
 75  * KVM/XEN might conditionally disable it later in the boot process again.
 76  */
 77 DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
 78 
 79 /*
 80  * Shortcut for the queued_spin_lock_slowpath() function that allows
 81  * virt to hijack it.
 82  *
 83  * Returns:
 84  *   true - lock has been negotiated, all done;
 85  *   false - queued_spin_lock_slowpath() will do its thing.
 86  */
 87 #define virt_spin_lock virt_spin_lock
 88 static inline bool virt_spin_lock(struct qspinlock *lock)
 89 {
 90         int val;
 91 
 92         if (!static_branch_likely(&virt_spin_lock_key))
 93                 return false;
 94 
 95         /*
 96          * On hypervisors without PARAVIRT_SPINLOCKS support we fall
 97          * back to a Test-and-Set spinlock, because fair locks have
 98          * horrible lock 'holder' preemption issues.
 99          */
100 
101  __retry:
102         val = atomic_read(&lock->val);
103 
104         if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
105                 cpu_relax();
106                 goto __retry;
107         }
108 
109         return true;
110 }
111 
112 #endif /* CONFIG_PARAVIRT */
113 
114 #include <asm-generic/qspinlock.h>
115 
116 #endif /* _ASM_X86_QSPINLOCK_H */
117 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php