~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/qspinlock_paravirt.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __ASM_QSPINLOCK_PARAVIRT_H
  3 #define __ASM_QSPINLOCK_PARAVIRT_H
  4 
  5 #include <asm/ibt.h>
  6 
  7 void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
  8 
  9 /*
 10  * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
 11  * registers. For i386, however, only 1 32-bit register needs to be saved
 12  * and restored. So an optimized version of __pv_queued_spin_unlock() is
 13  * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
 14  */
 15 #ifdef CONFIG_64BIT
 16 
 17 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
 18 #define __pv_queued_spin_unlock __pv_queued_spin_unlock
 19 
 20 /*
 21  * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
 22  * which combines the registers saving trunk and the body of the following
 23  * C code.  Note that it puts the code in the .spinlock.text section which
 24  * is equivalent to adding __lockfunc in the C code:
 25  *
 26  * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
 27  * {
 28  *      u8 lockval = _Q_LOCKED_VAL;
 29  *
 30  *      if (try_cmpxchg(&lock->locked, &lockval, 0))
 31  *              return;
 32  *      pv_queued_spin_unlock_slowpath(lock, lockval);
 33  * }
 34  *
 35  * For x86-64,
 36  *   rdi = lock              (first argument)
 37  *   rsi = lockval           (second argument)
 38  *   rdx = internal variable (set to 0)
 39  */
 40 #define PV_UNLOCK_ASM                                                   \
 41         FRAME_BEGIN                                                     \
 42         "push  %rdx\n\t"                                                \
 43         "mov   $" __stringify(_Q_LOCKED_VAL) ",%eax\n\t"                \
 44         "xor   %edx,%edx\n\t"                                           \
 45         LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t"                            \
 46         "jne   .slowpath\n\t"                                           \
 47         "pop   %rdx\n\t"                                                \
 48         FRAME_END                                                       \
 49         ASM_RET                                                         \
 50         ".slowpath:\n\t"                                                \
 51         "push   %rsi\n\t"                                               \
 52         "movzbl %al,%esi\n\t"                                           \
 53         "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t"   \
 54         "pop    %rsi\n\t"                                               \
 55         "pop    %rdx\n\t"                                               \
 56         FRAME_END
 57 
 58 DEFINE_ASM_FUNC(__raw_callee_save___pv_queued_spin_unlock,
 59                 PV_UNLOCK_ASM, .spinlock.text);
 60 
 61 #else /* CONFIG_64BIT */
 62 
 63 extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock);
 64 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text");
 65 
 66 #endif /* CONFIG_64BIT */
 67 #endif
 68 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php