~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/include/asm/barrier.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Based on arch/arm/include/asm/barrier.h
  4  *
  5  * Copyright (C) 2012 ARM Ltd.
  6  * Copyright (C) 2013 Regents of the University of California
  7  * Copyright (C) 2017 SiFive
  8  */
  9 
 10 #ifndef _ASM_RISCV_BARRIER_H
 11 #define _ASM_RISCV_BARRIER_H
 12 
 13 #ifndef __ASSEMBLY__
 14 #include <asm/cmpxchg.h>
 15 #include <asm/fence.h>
 16 
 17 #define nop()           __asm__ __volatile__ ("nop")
 18 #define __nops(n)       ".rept  " #n "\nnop\n.endr\n"
 19 #define nops(n)         __asm__ __volatile__ (__nops(n))
 20 
 21 
 22 /* These barriers need to enforce ordering on both devices or memory. */
 23 #define __mb()          RISCV_FENCE(iorw, iorw)
 24 #define __rmb()         RISCV_FENCE(ir, ir)
 25 #define __wmb()         RISCV_FENCE(ow, ow)
 26 
 27 /* These barriers do not need to enforce ordering on devices, just memory. */
 28 #define __smp_mb()      RISCV_FENCE(rw, rw)
 29 #define __smp_rmb()     RISCV_FENCE(r, r)
 30 #define __smp_wmb()     RISCV_FENCE(w, w)
 31 
 32 /*
 33  * This is a very specific barrier: it's currently only used in two places in
 34  * the kernel, both in the scheduler.  See include/linux/spinlock.h for the two
 35  * orderings it guarantees, but the "critical section is RCsc" guarantee
 36  * mandates a barrier on RISC-V.  The sequence looks like:
 37  *
 38  *    lr.aq lock
 39  *    sc    lock <= LOCKED
 40  *    smp_mb__after_spinlock()
 41  *    // critical section
 42  *    lr    lock
 43  *    sc.rl lock <= UNLOCKED
 44  *
 45  * The AQ/RL pair provides a RCpc critical section, but there's not really any
 46  * way we can take advantage of that here because the ordering is only enforced
 47  * on that one lock.  Thus, we're just doing a full fence.
 48  *
 49  * Since we allow writeX to be called from preemptive regions we need at least
 50  * an "o" in the predecessor set to ensure device writes are visible before the
 51  * task is marked as available for scheduling on a new hart.  While I don't see
 52  * any concrete reason we need a full IO fence, it seems safer to just upgrade
 53  * this in order to avoid any IO crossing a scheduling boundary.  In both
 54  * instances the scheduler pairs this with an mb(), so nothing is necessary on
 55  * the new hart.
 56  */
 57 #define smp_mb__after_spinlock()        RISCV_FENCE(iorw, iorw)
 58 
 59 #define __smp_store_release(p, v)                                       \
 60 do {                                                                    \
 61         compiletime_assert_atomic_type(*p);                             \
 62         RISCV_FENCE(rw, w);                                             \
 63         WRITE_ONCE(*p, v);                                              \
 64 } while (0)
 65 
 66 #define __smp_load_acquire(p)                                           \
 67 ({                                                                      \
 68         typeof(*p) ___p1 = READ_ONCE(*p);                               \
 69         compiletime_assert_atomic_type(*p);                             \
 70         RISCV_FENCE(r, rw);                                             \
 71         ___p1;                                                          \
 72 })
 73 
 74 #ifdef CONFIG_RISCV_ISA_ZAWRS
 75 #define smp_cond_load_relaxed(ptr, cond_expr) ({                        \
 76         typeof(ptr) __PTR = (ptr);                                      \
 77         __unqual_scalar_typeof(*ptr) VAL;                               \
 78         for (;;) {                                                      \
 79                 VAL = READ_ONCE(*__PTR);                                \
 80                 if (cond_expr)                                          \
 81                         break;                                          \
 82                 __cmpwait_relaxed(ptr, VAL);                            \
 83         }                                                               \
 84         (typeof(*ptr))VAL;                                              \
 85 })
 86 #endif
 87 
 88 #include <asm-generic/barrier.h>
 89 
 90 #endif /* __ASSEMBLY__ */
 91 
 92 #endif /* _ASM_RISCV_BARRIER_H */
 93 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php