~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/barrier.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_BARRIER_H
  3 #define _ASM_X86_BARRIER_H
  4 
  5 #include <asm/alternative.h>
  6 #include <asm/nops.h>
  7 
  8 /*
  9  * Force strict CPU ordering.
 10  * And yes, this might be required on UP too when we're talking
 11  * to devices.
 12  */
 13 
 14 #ifdef CONFIG_X86_32
 15 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
 16                                       X86_FEATURE_XMM2) ::: "memory", "cc")
 17 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
 18                                        X86_FEATURE_XMM2) ::: "memory", "cc")
 19 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
 20                                        X86_FEATURE_XMM2) ::: "memory", "cc")
 21 #else
 22 #define __mb()  asm volatile("mfence":::"memory")
 23 #define __rmb() asm volatile("lfence":::"memory")
 24 #define __wmb() asm volatile("sfence" ::: "memory")
 25 #endif
 26 
 27 /**
 28  * array_index_mask_nospec() - generate a mask that is ~0UL when the
 29  *      bounds check succeeds and 0 otherwise
 30  * @index: array element index
 31  * @size: number of elements in array
 32  *
 33  * Returns:
 34  *     0 - (index < size)
 35  */
 36 #define array_index_mask_nospec(idx,sz) ({      \
 37         typeof((idx)+(sz)) __idx = (idx);       \
 38         typeof(__idx) __sz = (sz);              \
 39         unsigned long __mask;                   \
 40         asm volatile ("cmp %1,%2; sbb %0,%0"    \
 41                         :"=r" (__mask)          \
 42                         :ASM_INPUT_G (__sz),    \
 43                          "r" (__idx)            \
 44                         :"cc");                 \
 45         __mask; })
 46 
 47 /* Prevent speculative execution past this barrier. */
 48 #define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
 49 
 50 #define __dma_rmb()     barrier()
 51 #define __dma_wmb()     barrier()
 52 
 53 #define __smp_mb()      asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
 54 
 55 #define __smp_rmb()     dma_rmb()
 56 #define __smp_wmb()     barrier()
 57 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 58 
 59 #define __smp_store_release(p, v)                                       \
 60 do {                                                                    \
 61         compiletime_assert_atomic_type(*p);                             \
 62         barrier();                                                      \
 63         WRITE_ONCE(*p, v);                                              \
 64 } while (0)
 65 
 66 #define __smp_load_acquire(p)                                           \
 67 ({                                                                      \
 68         typeof(*p) ___p1 = READ_ONCE(*p);                               \
 69         compiletime_assert_atomic_type(*p);                             \
 70         barrier();                                                      \
 71         ___p1;                                                          \
 72 })
 73 
 74 /* Atomic operations are already serializing on x86 */
 75 #define __smp_mb__before_atomic()       do { } while (0)
 76 #define __smp_mb__after_atomic()        do { } while (0)
 77 
 78 /* Writing to CR3 provides a full memory barrier in switch_mm(). */
 79 #define smp_mb__after_switch_mm()       do { } while (0)
 80 
 81 #include <asm-generic/barrier.h>
 82 
 83 #endif /* _ASM_X86_BARRIER_H */
 84 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php