~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/hexagon/include/asm/spinlock.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Spinlock support for the Hexagon architecture
  4  *
  5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
  6  */
  7 
  8 #ifndef _ASM_SPINLOCK_H
  9 #define _ASM_SPINLOCK_H
 10 
 11 #include <asm/irqflags.h>
 12 #include <asm/barrier.h>
 13 #include <asm/processor.h>
 14 
 15 /*
 16  * This file is pulled in for SMP builds.
 17  * Really need to check all the barrier stuff for "true" SMP
 18  */
 19 
 20 /*
 21  * Read locks:
 22  * - load the lock value
 23  * - increment it
 24  * - if the lock value is still negative, go back and try again.
 25  * - unsuccessful store is unsuccessful.  Go back and try again.  Loser.
 26  * - successful store new lock value if positive -> lock acquired
 27  */
 28 static inline void arch_read_lock(arch_rwlock_t *lock)
 29 {
 30         __asm__ __volatile__(
 31                 "1:     R6 = memw_locked(%0);\n"
 32                 "       { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
 33                 "       { if (!P3) jump 1b; }\n"
 34                 "       memw_locked(%0,P3) = R6;\n"
 35                 "       { if (!P3) jump 1b; }\n"
 36                 :
 37                 : "r" (&lock->lock)
 38                 : "memory", "r6", "p3"
 39         );
 40 
 41 }
 42 
 43 static inline void arch_read_unlock(arch_rwlock_t *lock)
 44 {
 45         __asm__ __volatile__(
 46                 "1:     R6 = memw_locked(%0);\n"
 47                 "       R6 = add(R6,#-1);\n"
 48                 "       memw_locked(%0,P3) = R6\n"
 49                 "       if (!P3) jump 1b;\n"
 50                 :
 51                 : "r" (&lock->lock)
 52                 : "memory", "r6", "p3"
 53         );
 54 
 55 }
 56 
 57 /*  I think this returns 0 on fail, 1 on success.  */
 58 static inline int arch_read_trylock(arch_rwlock_t *lock)
 59 {
 60         int temp;
 61         __asm__ __volatile__(
 62                 "       R6 = memw_locked(%1);\n"
 63                 "       { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
 64                 "       { if (!P3) jump 1f; }\n"
 65                 "       memw_locked(%1,P3) = R6;\n"
 66                 "       { %0 = P3 }\n"
 67                 "1:\n"
 68                 : "=&r" (temp)
 69                 : "r" (&lock->lock)
 70                 : "memory", "r6", "p3"
 71         );
 72         return temp;
 73 }
 74 
 75 /*  Stuffs a -1 in the lock value?  */
 76 static inline void arch_write_lock(arch_rwlock_t *lock)
 77 {
 78         __asm__ __volatile__(
 79                 "1:     R6 = memw_locked(%0)\n"
 80                 "       { P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
 81                 "       { if (!P3) jump 1b; }\n"
 82                 "       memw_locked(%0,P3) = R6;\n"
 83                 "       { if (!P3) jump 1b; }\n"
 84                 :
 85                 : "r" (&lock->lock)
 86                 : "memory", "r6", "p3"
 87         );
 88 }
 89 
 90 
 91 static inline int arch_write_trylock(arch_rwlock_t *lock)
 92 {
 93         int temp;
 94         __asm__ __volatile__(
 95                 "       R6 = memw_locked(%1)\n"
 96                 "       { %0 = #0; P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
 97                 "       { if (!P3) jump 1f; }\n"
 98                 "       memw_locked(%1,P3) = R6;\n"
 99                 "       %0 = P3;\n"
100                 "1:\n"
101                 : "=&r" (temp)
102                 : "r" (&lock->lock)
103                 : "memory", "r6", "p3"
104         );
105         return temp;
106 
107 }
108 
109 static inline void arch_write_unlock(arch_rwlock_t *lock)
110 {
111         smp_mb();
112         lock->lock = 0;
113 }
114 
115 static inline void arch_spin_lock(arch_spinlock_t *lock)
116 {
117         __asm__ __volatile__(
118                 "1:     R6 = memw_locked(%0);\n"
119                 "       P3 = cmp.eq(R6,#0);\n"
120                 "       { if (!P3) jump 1b; R6 = #1; }\n"
121                 "       memw_locked(%0,P3) = R6;\n"
122                 "       { if (!P3) jump 1b; }\n"
123                 :
124                 : "r" (&lock->lock)
125                 : "memory", "r6", "p3"
126         );
127 
128 }
129 
130 static inline void arch_spin_unlock(arch_spinlock_t *lock)
131 {
132         smp_mb();
133         lock->lock = 0;
134 }
135 
136 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
137 {
138         int temp;
139         __asm__ __volatile__(
140                 "       R6 = memw_locked(%1);\n"
141                 "       P3 = cmp.eq(R6,#0);\n"
142                 "       { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
143                 "       memw_locked(%1,P3) = R6;\n"
144                 "       %0 = P3;\n"
145                 "1:\n"
146                 : "=&r" (temp)
147                 : "r" (&lock->lock)
148                 : "memory", "r6", "p3"
149         );
150         return temp;
151 }
152 
153 /*
154  * SMP spinlocks are intended to allow only a single CPU at the lock
155  */
156 #define arch_spin_is_locked(x) ((x)->lock != 0)
157 
158 #endif
159 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php