~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/include/asm/spinlock.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm/include/asm/spinlock.h (Architecture alpha) and /arch/sparc64/include/asm-sparc64/spinlock.h (Architecture sparc64)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 
  2 #ifndef __ASM_SPINLOCK_H                          
  3 #define __ASM_SPINLOCK_H                          
  4                                                   
  5 #if __LINUX_ARM_ARCH__ < 6                        
  6 #error SMP not supported on pre-ARMv6 CPUs        
  7 #endif                                            
  8                                                   
  9 #include <linux/prefetch.h>                       
 10 #include <asm/barrier.h>                          
 11 #include <asm/processor.h>                        
 12                                                   
 13 /*                                                
 14  * sev and wfe are ARMv6K extensions.  Uniproc    
 15  * extensions, so when running on UP, we have     
 16  */                                               
 17 #ifdef CONFIG_THUMB2_KERNEL                       
 18 /*                                                
 19  * For Thumb-2, special care is needed to ensu    
 20  * instruction really does assemble to exactly    
 21  * the SMP_ON_UP fixup code).   By itself "wfe    
 22  * assembler to insert a extra (16-bit) IT ins    
 23  * presence or absence of neighbouring conditi    
 24  *                                                
 25  * To avoid this unpredictability, an appropri    
 26  * the assembler won't change IT instructions     
 27  * in the input.                                  
 28  */                                               
 29 #define WFE(cond)       __ALT_SMP_ASM(            
 30         "it " cond "\n\t"                         
 31         "wfe" cond ".n",                          
 32                                                   
 33         "nop.w"                                   
 34 )                                                 
 35 #else                                             
 36 #define WFE(cond)       __ALT_SMP_ASM("wfe" co    
 37 #endif                                            
 38                                                   
 39 #define SEV             __ALT_SMP_ASM(WASM(sev    
 40                                                   
 41 static inline void dsb_sev(void)                  
 42 {                                                 
 43                                                   
 44         dsb(ishst);                               
 45         __asm__(SEV);                             
 46 }                                                 
 47                                                   
 48 /*                                                
 49  * ARMv6 ticket-based spin-locking.               
 50  *                                                
 51  * A memory barrier is required after we get a    
 52  * release it, because V6 CPUs are assumed to     
 53  * memory.                                        
 54  */                                               
 55                                                   
 56 static inline void arch_spin_lock(arch_spinloc    
 57 {                                                 
 58         unsigned long tmp;                        
 59         u32 newval;                               
 60         arch_spinlock_t lockval;                  
 61                                                   
 62         prefetchw(&lock->slock);                  
 63         __asm__ __volatile__(                     
 64 "1:     ldrex   %0, [%3]\n"                       
 65 "       add     %1, %0, %4\n"                     
 66 "       strex   %2, %1, [%3]\n"                   
 67 "       teq     %2, #0\n"                         
 68 "       bne     1b"                               
 69         : "=&r" (lockval), "=&r" (newval), "=&    
 70         : "r" (&lock->slock), "I" (1 << TICKET    
 71         : "cc");                                  
 72                                                   
 73         while (lockval.tickets.next != lockval    
 74                 wfe();                            
 75                 lockval.tickets.owner = READ_O    
 76         }                                         
 77                                                   
 78         smp_mb();                                 
 79 }                                                 
 80                                                   
 81 static inline int arch_spin_trylock(arch_spinl    
 82 {                                                 
 83         unsigned long contended, res;             
 84         u32 slock;                                
 85                                                   
 86         prefetchw(&lock->slock);                  
 87         do {                                      
 88                 __asm__ __volatile__(             
 89                 "       ldrex   %0, [%3]\n"       
 90                 "       mov     %2, #0\n"         
 91                 "       subs    %1, %0, %0, ro    
 92                 "       addeq   %0, %0, %4\n"     
 93                 "       strexeq %2, %0, [%3]"     
 94                 : "=&r" (slock), "=&r" (conten    
 95                 : "r" (&lock->slock), "I" (1 <    
 96                 : "cc");                          
 97         } while (res);                            
 98                                                   
 99         if (!contended) {                         
100                 smp_mb();                         
101                 return 1;                         
102         } else {                                  
103                 return 0;                         
104         }                                         
105 }                                                 
106                                                   
107 static inline void arch_spin_unlock(arch_spinl    
108 {                                                 
109         smp_mb();                                 
110         lock->tickets.owner++;                    
111         dsb_sev();                                
112 }                                                 
113                                                   
114 static inline int arch_spin_value_unlocked(arc    
115 {                                                 
116         return lock.tickets.owner == lock.tick    
117 }                                                 
118                                                   
119 static inline int arch_spin_is_locked(arch_spi    
120 {                                                 
121         return !arch_spin_value_unlocked(READ_    
122 }                                                 
123                                                   
124 static inline int arch_spin_is_contended(arch_    
125 {                                                 
126         struct __raw_tickets tickets = READ_ON    
127         return (tickets.next - tickets.owner)     
128 }                                                 
129 #define arch_spin_is_contended  arch_spin_is_c    
130                                                   
131 /*                                                
132  * RWLOCKS                                        
133  *                                                
134  *                                                
135  * Write locks are easy - we just set bit 31.     
136  * just write zero since the lock is exclusive    
137  */                                               
138                                                   
139 static inline void arch_write_lock(arch_rwlock    
140 {                                                 
141         unsigned long tmp;                        
142                                                   
143         prefetchw(&rw->lock);                     
144         __asm__ __volatile__(                     
145 "1:     ldrex   %0, [%1]\n"                       
146 "       teq     %0, #0\n"                         
147         WFE("ne")                                 
148 "       strexeq %0, %2, [%1]\n"                   
149 "       teq     %0, #0\n"                         
150 "       bne     1b"                               
151         : "=&r" (tmp)                             
152         : "r" (&rw->lock), "r" (0x80000000)       
153         : "cc");                                  
154                                                   
155         smp_mb();                                 
156 }                                                 
157                                                   
158 static inline int arch_write_trylock(arch_rwlo    
159 {                                                 
160         unsigned long contended, res;             
161                                                   
162         prefetchw(&rw->lock);                     
163         do {                                      
164                 __asm__ __volatile__(             
165                 "       ldrex   %0, [%2]\n"       
166                 "       mov     %1, #0\n"         
167                 "       teq     %0, #0\n"         
168                 "       strexeq %1, %3, [%2]"     
169                 : "=&r" (contended), "=&r" (re    
170                 : "r" (&rw->lock), "r" (0x8000    
171                 : "cc");                          
172         } while (res);                            
173                                                   
174         if (!contended) {                         
175                 smp_mb();                         
176                 return 1;                         
177         } else {                                  
178                 return 0;                         
179         }                                         
180 }                                                 
181                                                   
182 static inline void arch_write_unlock(arch_rwlo    
183 {                                                 
184         smp_mb();                                 
185                                                   
186         __asm__ __volatile__(                     
187         "str    %1, [%0]\n"                       
188         :                                         
189         : "r" (&rw->lock), "r" (0)                
190         : "cc");                                  
191                                                   
192         dsb_sev();                                
193 }                                                 
194                                                   
195 /*                                                
196  * Read locks are a bit more hairy:               
197  *  - Exclusively load the lock value.            
198  *  - Increment it.                               
199  *  - Store new lock value if positive, and we    
200  *    If the value is negative, we've already     
201  *  - If we failed to store the value, we want    
202  *  - If we failed, try again.                    
203  * Unlocking is similarly hairy.  We may have     
204  * currently active.  However, we know we won'    
205  * locks.                                         
206  */                                               
207 static inline void arch_read_lock(arch_rwlock_    
208 {                                                 
209         unsigned long tmp, tmp2;                  
210                                                   
211         prefetchw(&rw->lock);                     
212         __asm__ __volatile__(                     
213 "       .syntax unified\n"                        
214 "1:     ldrex   %0, [%2]\n"                       
215 "       adds    %0, %0, #1\n"                     
216 "       strexpl %1, %0, [%2]\n"                   
217         WFE("mi")                                 
218 "       rsbspl  %0, %1, #0\n"                     
219 "       bmi     1b"                               
220         : "=&r" (tmp), "=&r" (tmp2)               
221         : "r" (&rw->lock)                         
222         : "cc");                                  
223                                                   
224         smp_mb();                                 
225 }                                                 
226                                                   
227 static inline void arch_read_unlock(arch_rwloc    
228 {                                                 
229         unsigned long tmp, tmp2;                  
230                                                   
231         smp_mb();                                 
232                                                   
233         prefetchw(&rw->lock);                     
234         __asm__ __volatile__(                     
235 "1:     ldrex   %0, [%2]\n"                       
236 "       sub     %0, %0, #1\n"                     
237 "       strex   %1, %0, [%2]\n"                   
238 "       teq     %1, #0\n"                         
239 "       bne     1b"                               
240         : "=&r" (tmp), "=&r" (tmp2)               
241         : "r" (&rw->lock)                         
242         : "cc");                                  
243                                                   
244         if (tmp == 0)                             
245                 dsb_sev();                        
246 }                                                 
247                                                   
248 static inline int arch_read_trylock(arch_rwloc    
249 {                                                 
250         unsigned long contended, res;             
251                                                   
252         prefetchw(&rw->lock);                     
253         do {                                      
254                 __asm__ __volatile__(             
255                 "       ldrex   %0, [%2]\n"       
256                 "       mov     %1, #0\n"         
257                 "       adds    %0, %0, #1\n"     
258                 "       strexpl %1, %0, [%2]"     
259                 : "=&r" (contended), "=&r" (re    
260                 : "r" (&rw->lock)                 
261                 : "cc");                          
262         } while (res);                            
263                                                   
264         /* If the lock is negative, then it is    
265         if (contended < 0x80000000) {             
266                 smp_mb();                         
267                 return 1;                         
268         } else {                                  
269                 return 0;                         
270         }                                         
271 }                                                 
272                                                   
273 #endif /* __ASM_SPINLOCK_H */                     
274                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php