~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/sync_core.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_SYNC_CORE_H
  3 #define _ASM_X86_SYNC_CORE_H
  4 
  5 #include <linux/preempt.h>
  6 #include <asm/processor.h>
  7 #include <asm/cpufeature.h>
  8 #include <asm/special_insns.h>
  9 
 10 #ifdef CONFIG_X86_32
 11 static inline void iret_to_self(void)
 12 {
 13         asm volatile (
 14                 "pushfl\n\t"
 15                 "pushl %%cs\n\t"
 16                 "pushl $1f\n\t"
 17                 "iret\n\t"
 18                 "1:"
 19                 : ASM_CALL_CONSTRAINT : : "memory");
 20 }
 21 #else
 22 static inline void iret_to_self(void)
 23 {
 24         unsigned int tmp;
 25 
 26         asm volatile (
 27                 "mov %%ss, %0\n\t"
 28                 "pushq %q0\n\t"
 29                 "pushq %%rsp\n\t"
 30                 "addq $8, (%%rsp)\n\t"
 31                 "pushfq\n\t"
 32                 "mov %%cs, %0\n\t"
 33                 "pushq %q0\n\t"
 34                 "pushq $1f\n\t"
 35                 "iretq\n\t"
 36                 "1:"
 37                 : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
 38 }
 39 #endif /* CONFIG_X86_32 */
 40 
 41 /*
 42  * This function forces the icache and prefetched instruction stream to
 43  * catch up with reality in two very specific cases:
 44  *
 45  *  a) Text was modified using one virtual address and is about to be executed
 46  *     from the same physical page at a different virtual address.
 47  *
 48  *  b) Text was modified on a different CPU, may subsequently be
 49  *     executed on this CPU, and you want to make sure the new version
 50  *     gets executed.  This generally means you're calling this in an IPI.
 51  *
 52  * If you're calling this for a different reason, you're probably doing
 53  * it wrong.
 54  *
 55  * Like all of Linux's memory ordering operations, this is a
 56  * compiler barrier as well.
 57  */
 58 static inline void sync_core(void)
 59 {
 60         /*
 61          * The SERIALIZE instruction is the most straightforward way to
 62          * do this, but it is not universally available.
 63          */
 64         if (static_cpu_has(X86_FEATURE_SERIALIZE)) {
 65                 serialize();
 66                 return;
 67         }
 68 
 69         /*
 70          * For all other processors, there are quite a few ways to do this.
 71          * IRET-to-self is nice because it works on every CPU, at any CPL
 72          * (so it's compatible with paravirtualization), and it never exits
 73          * to a hypervisor.  The only downsides are that it's a bit slow
 74          * (it seems to be a bit more than 2x slower than the fastest
 75          * options) and that it unmasks NMIs.  The "push %cs" is needed,
 76          * because in paravirtual environments __KERNEL_CS may not be a
 77          * valid CS value when we do IRET directly.
 78          *
 79          * In case NMI unmasking or performance ever becomes a problem,
 80          * the next best option appears to be MOV-to-CR2 and an
 81          * unconditional jump.  That sequence also works on all CPUs,
 82          * but it will fault at CPL3 (i.e. Xen PV).
 83          *
 84          * CPUID is the conventional way, but it's nasty: it doesn't
 85          * exist on some 486-like CPUs, and it usually exits to a
 86          * hypervisor.
 87          */
 88         iret_to_self();
 89 }
 90 
 91 /*
 92  * Ensure that a core serializing instruction is issued before returning
 93  * to user-mode. x86 implements return to user-space through sysexit,
 94  * sysrel, and sysretq, which are not core serializing.
 95  */
 96 static inline void sync_core_before_usermode(void)
 97 {
 98         /* With PTI, we unconditionally serialize before running user code. */
 99         if (static_cpu_has(X86_FEATURE_PTI))
100                 return;
101 
102         /*
103          * Even if we're in an interrupt, we might reschedule before returning,
104          * in which case we could switch to a different thread in the same mm
105          * and return using SYSRET or SYSEXIT.  Instead of trying to keep
106          * track of our need to sync the core, just sync right away.
107          */
108         sync_core();
109 }
110 
111 #endif /* _ASM_X86_SYNC_CORE_H */
112 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php