~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/arch/arm64/include/asm/barrier.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
  3 #define _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
  4 
  5 /*
  6  * From tools/perf/perf-sys.h, last modified in:
  7  * f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers
  8  *
  9  * XXX: arch/arm64/include/asm/barrier.h in the kernel sources use dsb, is this
 10  * a case like for arm32 where we do things differently in userspace?
 11  */
 12 
 13 #define mb()            asm volatile("dmb ish" ::: "memory")
 14 #define wmb()           asm volatile("dmb ishst" ::: "memory")
 15 #define rmb()           asm volatile("dmb ishld" ::: "memory")
 16 
 17 /*
 18  * Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same
 19  * implementation as above mb()/wmb()/rmb(), though for the latter kernel uses
 20  * dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below
 21  * smp_*() don't.
 22  */
 23 #define smp_mb()        asm volatile("dmb ish" ::: "memory")
 24 #define smp_wmb()       asm volatile("dmb ishst" ::: "memory")
 25 #define smp_rmb()       asm volatile("dmb ishld" ::: "memory")
 26 
 27 #define smp_store_release(p, v)                                         \
 28 do {                                                                    \
 29         union { typeof(*p) __val; char __c[1]; } __u =                  \
 30                 { .__val = (v) };                                       \
 31                                                                         \
 32         switch (sizeof(*p)) {                                           \
 33         case 1:                                                         \
 34                 asm volatile ("stlrb %w1, %0"                           \
 35                                 : "=Q" (*p)                             \
 36                                 : "r" (*(__u8_alias_t *)__u.__c)        \
 37                                 : "memory");                            \
 38                 break;                                                  \
 39         case 2:                                                         \
 40                 asm volatile ("stlrh %w1, %0"                           \
 41                                 : "=Q" (*p)                             \
 42                                 : "r" (*(__u16_alias_t *)__u.__c)       \
 43                                 : "memory");                            \
 44                 break;                                                  \
 45         case 4:                                                         \
 46                 asm volatile ("stlr %w1, %0"                            \
 47                                 : "=Q" (*p)                             \
 48                                 : "r" (*(__u32_alias_t *)__u.__c)       \
 49                                 : "memory");                            \
 50                 break;                                                  \
 51         case 8:                                                         \
 52                 asm volatile ("stlr %1, %0"                             \
 53                                 : "=Q" (*p)                             \
 54                                 : "r" (*(__u64_alias_t *)__u.__c)       \
 55                                 : "memory");                            \
 56                 break;                                                  \
 57         default:                                                        \
 58                 /* Only to shut up gcc ... */                           \
 59                 mb();                                                   \
 60                 break;                                                  \
 61         }                                                               \
 62 } while (0)
 63 
 64 #define smp_load_acquire(p)                                             \
 65 ({                                                                      \
 66         union { typeof(*p) __val; char __c[1]; } __u =                  \
 67                 { .__c = { 0 } };                                       \
 68                                                                         \
 69         switch (sizeof(*p)) {                                           \
 70         case 1:                                                         \
 71                 asm volatile ("ldarb %w0, %1"                           \
 72                         : "=r" (*(__u8_alias_t *)__u.__c)               \
 73                         : "Q" (*p) : "memory");                         \
 74                 break;                                                  \
 75         case 2:                                                         \
 76                 asm volatile ("ldarh %w0, %1"                           \
 77                         : "=r" (*(__u16_alias_t *)__u.__c)              \
 78                         : "Q" (*p) : "memory");                         \
 79                 break;                                                  \
 80         case 4:                                                         \
 81                 asm volatile ("ldar %w0, %1"                            \
 82                         : "=r" (*(__u32_alias_t *)__u.__c)              \
 83                         : "Q" (*p) : "memory");                         \
 84                 break;                                                  \
 85         case 8:                                                         \
 86                 asm volatile ("ldar %0, %1"                             \
 87                         : "=r" (*(__u64_alias_t *)__u.__c)              \
 88                         : "Q" (*p) : "memory");                         \
 89                 break;                                                  \
 90         default:                                                        \
 91                 /* Only to shut up gcc ... */                           \
 92                 mb();                                                   \
 93                 break;                                                  \
 94         }                                                               \
 95         __u.__val;                                                      \
 96 })
 97 
 98 #endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
 99 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php