~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/hexagon/include/asm/atomic.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Atomic operations for the Hexagon architecture
  4  *
  5  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  6  */
  7 
  8 #ifndef _ASM_ATOMIC_H
  9 #define _ASM_ATOMIC_H
 10 
 11 #include <linux/types.h>
 12 #include <asm/cmpxchg.h>
 13 #include <asm/barrier.h>
 14 
 15 /*  Normal writes in our arch don't clear lock reservations  */
 16 
 17 static inline void arch_atomic_set(atomic_t *v, int new)
 18 {
 19         asm volatile(
 20                 "1:     r6 = memw_locked(%0);\n"
 21                 "       memw_locked(%0,p0) = %1;\n"
 22                 "       if (!P0) jump 1b;\n"
 23                 :
 24                 : "r" (&v->counter), "r" (new)
 25                 : "memory", "p0", "r6"
 26         );
 27 }
 28 
 29 #define arch_atomic_set_release(v, i)   arch_atomic_set((v), (i))
 30 
 31 #define arch_atomic_read(v)             READ_ONCE((v)->counter)
 32 
 33 #define ATOMIC_OP(op)                                                   \
 34 static inline void arch_atomic_##op(int i, atomic_t *v)                 \
 35 {                                                                       \
 36         int output;                                                     \
 37                                                                         \
 38         __asm__ __volatile__ (                                          \
 39                 "1:     %0 = memw_locked(%1);\n"                        \
 40                 "       %0 = "#op "(%0,%2);\n"                          \
 41                 "       memw_locked(%1,P3)=%0;\n"                       \
 42                 "       if (!P3) jump 1b;\n"                            \
 43                 : "=&r" (output)                                        \
 44                 : "r" (&v->counter), "r" (i)                            \
 45                 : "memory", "p3"                                        \
 46         );                                                              \
 47 }                                                                       \
 48 
 49 #define ATOMIC_OP_RETURN(op)                                            \
 50 static inline int arch_atomic_##op##_return(int i, atomic_t *v)         \
 51 {                                                                       \
 52         int output;                                                     \
 53                                                                         \
 54         __asm__ __volatile__ (                                          \
 55                 "1:     %0 = memw_locked(%1);\n"                        \
 56                 "       %0 = "#op "(%0,%2);\n"                          \
 57                 "       memw_locked(%1,P3)=%0;\n"                       \
 58                 "       if (!P3) jump 1b;\n"                            \
 59                 : "=&r" (output)                                        \
 60                 : "r" (&v->counter), "r" (i)                            \
 61                 : "memory", "p3"                                        \
 62         );                                                              \
 63         return output;                                                  \
 64 }
 65 
 66 #define ATOMIC_FETCH_OP(op)                                             \
 67 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)            \
 68 {                                                                       \
 69         int output, val;                                                \
 70                                                                         \
 71         __asm__ __volatile__ (                                          \
 72                 "1:     %0 = memw_locked(%2);\n"                        \
 73                 "       %1 = "#op "(%0,%3);\n"                          \
 74                 "       memw_locked(%2,P3)=%1;\n"                       \
 75                 "       if (!P3) jump 1b;\n"                            \
 76                 : "=&r" (output), "=&r" (val)                           \
 77                 : "r" (&v->counter), "r" (i)                            \
 78                 : "memory", "p3"                                        \
 79         );                                                              \
 80         return output;                                                  \
 81 }
 82 
 83 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 84 
 85 ATOMIC_OPS(add)
 86 ATOMIC_OPS(sub)
 87 
 88 #define arch_atomic_add_return                  arch_atomic_add_return
 89 #define arch_atomic_sub_return                  arch_atomic_sub_return
 90 #define arch_atomic_fetch_add                   arch_atomic_fetch_add
 91 #define arch_atomic_fetch_sub                   arch_atomic_fetch_sub
 92 
 93 #undef ATOMIC_OPS
 94 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
 95 
 96 ATOMIC_OPS(and)
 97 ATOMIC_OPS(or)
 98 ATOMIC_OPS(xor)
 99 
100 #define arch_atomic_fetch_and                   arch_atomic_fetch_and
101 #define arch_atomic_fetch_or                    arch_atomic_fetch_or
102 #define arch_atomic_fetch_xor                   arch_atomic_fetch_xor
103 
104 #undef ATOMIC_OPS
105 #undef ATOMIC_FETCH_OP
106 #undef ATOMIC_OP_RETURN
107 #undef ATOMIC_OP
108 
109 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
110 {
111         int __oldval;
112         register int tmp;
113 
114         asm volatile(
115                 "1:     %0 = memw_locked(%2);"
116                 "       {"
117                 "               p3 = cmp.eq(%0, %4);"
118                 "               if (p3.new) jump:nt 2f;"
119                 "               %1 = add(%0, %3);"
120                 "       }"
121                 "       memw_locked(%2, p3) = %1;"
122                 "       {"
123                 "               if (!p3) jump 1b;"
124                 "       }"
125                 "2:"
126                 : "=&r" (__oldval), "=&r" (tmp)
127                 : "r" (v), "r" (a), "r" (u)
128                 : "memory", "p3"
129         );
130         return __oldval;
131 }
132 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
133 
134 #endif
135 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php