~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/openrisc/include/asm/atomic.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
  3  *
  4  * This file is licensed under the terms of the GNU General Public License
  5  * version 2.  This program is licensed "as is" without any warranty of any
  6  * kind, whether express or implied.
  7  */
  8 
  9 #ifndef __ASM_OPENRISC_ATOMIC_H
 10 #define __ASM_OPENRISC_ATOMIC_H
 11 
 12 #include <linux/types.h>
 13 
 14 /* Atomically perform op with v->counter and i */
 15 #define ATOMIC_OP(op)                                                   \
 16 static inline void arch_atomic_##op(int i, atomic_t *v)                 \
 17 {                                                                       \
 18         int tmp;                                                        \
 19                                                                         \
 20         __asm__ __volatile__(                                           \
 21                 "1:     l.lwa   %0,0(%1)        \n"                     \
 22                 "       l." #op " %0,%0,%2      \n"                     \
 23                 "       l.swa   0(%1),%0        \n"                     \
 24                 "       l.bnf   1b              \n"                     \
 25                 "        l.nop                  \n"                     \
 26                 : "=&r"(tmp)                                            \
 27                 : "r"(&v->counter), "r"(i)                              \
 28                 : "cc", "memory");                                      \
 29 }
 30 
 31 /* Atomically perform op with v->counter and i, return the result */
 32 #define ATOMIC_OP_RETURN(op)                                            \
 33 static inline int arch_atomic_##op##_return(int i, atomic_t *v)         \
 34 {                                                                       \
 35         int tmp;                                                        \
 36                                                                         \
 37         __asm__ __volatile__(                                           \
 38                 "1:     l.lwa   %0,0(%1)        \n"                     \
 39                 "       l." #op " %0,%0,%2      \n"                     \
 40                 "       l.swa   0(%1),%0        \n"                     \
 41                 "       l.bnf   1b              \n"                     \
 42                 "        l.nop                  \n"                     \
 43                 : "=&r"(tmp)                                            \
 44                 : "r"(&v->counter), "r"(i)                              \
 45                 : "cc", "memory");                                      \
 46                                                                         \
 47         return tmp;                                                     \
 48 }
 49 
 50 /* Atomically perform op with v->counter and i, return orig v->counter */
 51 #define ATOMIC_FETCH_OP(op)                                             \
 52 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)            \
 53 {                                                                       \
 54         int tmp, old;                                                   \
 55                                                                         \
 56         __asm__ __volatile__(                                           \
 57                 "1:     l.lwa   %0,0(%2)        \n"                     \
 58                 "       l." #op " %1,%0,%3      \n"                     \
 59                 "       l.swa   0(%2),%1        \n"                     \
 60                 "       l.bnf   1b              \n"                     \
 61                 "        l.nop                  \n"                     \
 62                 : "=&r"(old), "=&r"(tmp)                                \
 63                 : "r"(&v->counter), "r"(i)                              \
 64                 : "cc", "memory");                                      \
 65                                                                         \
 66         return old;                                                     \
 67 }
 68 
 69 ATOMIC_OP_RETURN(add)
 70 ATOMIC_OP_RETURN(sub)
 71 
 72 ATOMIC_FETCH_OP(add)
 73 ATOMIC_FETCH_OP(sub)
 74 ATOMIC_FETCH_OP(and)
 75 ATOMIC_FETCH_OP(or)
 76 ATOMIC_FETCH_OP(xor)
 77 
 78 ATOMIC_OP(add)
 79 ATOMIC_OP(sub)
 80 ATOMIC_OP(and)
 81 ATOMIC_OP(or)
 82 ATOMIC_OP(xor)
 83 
 84 #undef ATOMIC_FETCH_OP
 85 #undef ATOMIC_OP_RETURN
 86 #undef ATOMIC_OP
 87 
 88 #define arch_atomic_add_return  arch_atomic_add_return
 89 #define arch_atomic_sub_return  arch_atomic_sub_return
 90 #define arch_atomic_fetch_add   arch_atomic_fetch_add
 91 #define arch_atomic_fetch_sub   arch_atomic_fetch_sub
 92 #define arch_atomic_fetch_and   arch_atomic_fetch_and
 93 #define arch_atomic_fetch_or    arch_atomic_fetch_or
 94 #define arch_atomic_fetch_xor   arch_atomic_fetch_xor
 95 #define arch_atomic_add         arch_atomic_add
 96 #define arch_atomic_sub         arch_atomic_sub
 97 #define arch_atomic_and         arch_atomic_and
 98 #define arch_atomic_or          arch_atomic_or
 99 #define arch_atomic_xor         arch_atomic_xor
100 
101 /*
102  * Atomically add a to v->counter as long as v is not already u.
103  * Returns the original value at v->counter.
104  *
105  * This is often used through atomic_inc_not_zero()
106  */
107 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
108 {
109         int old, tmp;
110 
111         __asm__ __volatile__(
112                 "1:     l.lwa %0, 0(%2)         \n"
113                 "       l.sfeq %0, %4           \n"
114                 "       l.bf 2f                 \n"
115                 "        l.add %1, %0, %3       \n"
116                 "       l.swa 0(%2), %1         \n"
117                 "       l.bnf 1b                \n"
118                 "        l.nop                  \n"
119                 "2:                             \n"
120                 : "=&r"(old), "=&r" (tmp)
121                 : "r"(&v->counter), "r"(a), "r"(u)
122                 : "cc", "memory");
123 
124         return old;
125 }
126 #define arch_atomic_fetch_add_unless    arch_atomic_fetch_add_unless
127 
128 #define arch_atomic_read(v)             READ_ONCE((v)->counter)
129 #define arch_atomic_set(v,i)            WRITE_ONCE((v)->counter, (i))
130 
131 #include <asm/cmpxchg.h>
132 
133 #endif /* __ASM_OPENRISC_ATOMIC_H */
134 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php