~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/include/asm/barrier.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4  */
  5 #ifndef __ASM_BARRIER_H
  6 #define __ASM_BARRIER_H
  7 
  8 /*
  9  * Hint encoding:
 10  *
 11  * Bit4: ordering or completion (0: completion, 1: ordering)
 12  * Bit3: barrier for previous read (0: true, 1: false)
 13  * Bit2: barrier for previous write (0: true, 1: false)
 14  * Bit1: barrier for succeeding read (0: true, 1: false)
 15  * Bit0: barrier for succeeding write (0: true, 1: false)
 16  *
 17  * Hint 0x700: barrier for "read after read" from the same address
 18  */
 19 
 20 #define DBAR(hint) __asm__ __volatile__("dbar %0 " : : "I"(hint) : "memory")
 21 
 22 #define crwrw           0b00000
 23 #define cr_r_           0b00101
 24 #define c_w_w           0b01010
 25 
 26 #define orwrw           0b10000
 27 #define or_r_           0b10101
 28 #define o_w_w           0b11010
 29 
 30 #define orw_w           0b10010
 31 #define or_rw           0b10100
 32 
 33 #define c_sync()        DBAR(crwrw)
 34 #define c_rsync()       DBAR(cr_r_)
 35 #define c_wsync()       DBAR(c_w_w)
 36 
 37 #define o_sync()        DBAR(orwrw)
 38 #define o_rsync()       DBAR(or_r_)
 39 #define o_wsync()       DBAR(o_w_w)
 40 
 41 #define ldacq_mb()      DBAR(or_rw)
 42 #define strel_mb()      DBAR(orw_w)
 43 
 44 #define mb()            c_sync()
 45 #define rmb()           c_rsync()
 46 #define wmb()           c_wsync()
 47 #define iob()           c_sync()
 48 #define wbflush()       c_sync()
 49 
 50 #define __smp_mb()      o_sync()
 51 #define __smp_rmb()     o_rsync()
 52 #define __smp_wmb()     o_wsync()
 53 
 54 #ifdef CONFIG_SMP
 55 #define __WEAK_LLSC_MB          "       dbar 0x700      \n"
 56 #else
 57 #define __WEAK_LLSC_MB          "                       \n"
 58 #endif
 59 
 60 #define __smp_mb__before_atomic()       barrier()
 61 #define __smp_mb__after_atomic()        barrier()
 62 
 63 /**
 64  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
 65  * @index: array element index
 66  * @size: number of elements in array
 67  *
 68  * Returns:
 69  *     0 - (@index < @size)
 70  */
 71 #define array_index_mask_nospec array_index_mask_nospec
 72 static inline unsigned long array_index_mask_nospec(unsigned long index,
 73                                                     unsigned long size)
 74 {
 75         unsigned long mask;
 76 
 77         __asm__ __volatile__(
 78                 "sltu   %0, %1, %2\n\t"
 79 #if (__SIZEOF_LONG__ == 4)
 80                 "sub.w  %0, $zero, %0\n\t"
 81 #elif (__SIZEOF_LONG__ == 8)
 82                 "sub.d  %0, $zero, %0\n\t"
 83 #endif
 84                 : "=r" (mask)
 85                 : "r" (index), "r" (size)
 86                 :);
 87 
 88         return mask;
 89 }
 90 
 91 #define __smp_load_acquire(p)                           \
 92 ({                                                      \
 93         typeof(*p) ___p1 = READ_ONCE(*p);               \
 94         compiletime_assert_atomic_type(*p);             \
 95         ldacq_mb();                                     \
 96         ___p1;                                          \
 97 })
 98 
 99 #define __smp_store_release(p, v)                       \
100 do {                                                    \
101         compiletime_assert_atomic_type(*p);             \
102         strel_mb();                                     \
103         WRITE_ONCE(*p, v);                              \
104 } while (0)
105 
106 #define __smp_store_mb(p, v)                                                    \
107 do {                                                                            \
108         union { typeof(p) __val; char __c[1]; } __u =                           \
109                 { .__val = (__force typeof(p)) (v) };                           \
110         unsigned long __tmp;                                                    \
111         switch (sizeof(p)) {                                                    \
112         case 1:                                                                 \
113                 *(volatile __u8 *)&p = *(__u8 *)__u.__c;                        \
114                 __smp_mb();                                                     \
115                 break;                                                          \
116         case 2:                                                                 \
117                 *(volatile __u16 *)&p = *(__u16 *)__u.__c;                      \
118                 __smp_mb();                                                     \
119                 break;                                                          \
120         case 4:                                                                 \
121                 __asm__ __volatile__(                                           \
122                 "amswap_db.w %[tmp], %[val], %[mem]     \n"                     \
123                 : [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp)                 \
124                 : [val] "r" (*(__u32 *)__u.__c)                                 \
125                 : );                                                            \
126                 break;                                                          \
127         case 8:                                                                 \
128                 __asm__ __volatile__(                                           \
129                 "amswap_db.d %[tmp], %[val], %[mem]     \n"                     \
130                 : [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp)                 \
131                 : [val] "r" (*(__u64 *)__u.__c)                                 \
132                 : );                                                            \
133                 break;                                                          \
134         }                                                                       \
135 } while (0)
136 
137 #include <asm-generic/barrier.h>
138 
139 #endif /* __ASM_BARRIER_H */
140 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php