~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sh/include/asm/spinlock-llsc.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/sh/include/asm/spinlock-llsc.h (Architecture mips) and /arch/i386/include/asm-i386/spinlock-llsc.h (Architecture i386)


  1 /* SPDX-License-Identifier: GPL-2.0                 1 
  2  *                                                
  3  * include/asm-sh/spinlock-llsc.h                 
  4  *                                                
  5  * Copyright (C) 2002, 2003 Paul Mundt            
  6  * Copyright (C) 2006, 2007 Akio Idehara          
  7  */                                               
  8 #ifndef __ASM_SH_SPINLOCK_LLSC_H                  
  9 #define __ASM_SH_SPINLOCK_LLSC_H                  
 10                                                   
 11 #include <asm/barrier.h>                          
 12 #include <asm/processor.h>                        
 13                                                   
 14 /*                                                
 15  * Your basic SMP spinlocks, allowing only a s    
 16  */                                               
 17                                                   
 18 #define arch_spin_is_locked(x)          ((x)->    
 19                                                   
 20 /*                                                
 21  * Simple spin lock operations.  There are two    
 22  * on the local processor, one does not.          
 23  *                                                
 24  * We make no fairness assumptions.  They have    
 25  */                                               
 26 static inline void arch_spin_lock(arch_spinloc    
 27 {                                                 
 28         unsigned long tmp;                        
 29         unsigned long oldval;                     
 30                                                   
 31         __asm__ __volatile__ (                    
 32                 "1:                               
 33                 "movli.l        @%2, %0 ! arch    
 34                 "mov            %0, %1            
 35                 "mov            #0, %0            
 36                 "movco.l        %0, @%2           
 37                 "bf             1b                
 38                 "cmp/pl         %1                
 39                 "bf             1b                
 40                 : "=&z" (tmp), "=&r" (oldval)     
 41                 : "r" (&lock->lock)               
 42                 : "t", "memory"                   
 43         );                                        
 44 }                                                 
 45                                                   
 46 static inline void arch_spin_unlock(arch_spinl    
 47 {                                                 
 48         unsigned long tmp;                        
 49                                                   
 50         /* This could be optimised with ARCH_H    
 51         mmiowb();                                 
 52         __asm__ __volatile__ (                    
 53                 "mov            #1, %0 ! arch_    
 54                 "mov.l          %0, @%1           
 55                 : "=&z" (tmp)                     
 56                 : "r" (&lock->lock)               
 57                 : "t", "memory"                   
 58         );                                        
 59 }                                                 
 60                                                   
 61 static inline int arch_spin_trylock(arch_spinl    
 62 {                                                 
 63         unsigned long tmp, oldval;                
 64                                                   
 65         __asm__ __volatile__ (                    
 66                 "1:                               
 67                 "movli.l        @%2, %0 ! arch    
 68                 "mov            %0, %1            
 69                 "mov            #0, %0            
 70                 "movco.l        %0, @%2           
 71                 "bf             1b                
 72                 "synco                            
 73                 : "=&z" (tmp), "=&r" (oldval)     
 74                 : "r" (&lock->lock)               
 75                 : "t", "memory"                   
 76         );                                        
 77                                                   
 78         return oldval;                            
 79 }                                                 
 80                                                   
 81 /*                                                
 82  * Read-write spinlocks, allowing multiple rea    
 83  *                                                
 84  * NOTE! it is quite common to have readers in    
 85  * writers. For those circumstances we can "mi    
 86  * needs to get a irq-safe write-lock, but rea    
 87  * read-locks.                                    
 88  */                                               
 89                                                   
 90 static inline void arch_read_lock(arch_rwlock_    
 91 {                                                 
 92         unsigned long tmp;                        
 93                                                   
 94         __asm__ __volatile__ (                    
 95                 "1:                               
 96                 "movli.l        @%1, %0 ! arch    
 97                 "cmp/pl         %0                
 98                 "bf             1b                
 99                 "add            #-1, %0           
100                 "movco.l        %0, @%1           
101                 "bf             1b                
102                 : "=&z" (tmp)                     
103                 : "r" (&rw->lock)                 
104                 : "t", "memory"                   
105         );                                        
106 }                                                 
107                                                   
108 static inline void arch_read_unlock(arch_rwloc    
109 {                                                 
110         unsigned long tmp;                        
111                                                   
112         __asm__ __volatile__ (                    
113                 "1:                               
114                 "movli.l        @%1, %0 ! arch    
115                 "add            #1, %0            
116                 "movco.l        %0, @%1           
117                 "bf             1b                
118                 : "=&z" (tmp)                     
119                 : "r" (&rw->lock)                 
120                 : "t", "memory"                   
121         );                                        
122 }                                                 
123                                                   
124 static inline void arch_write_lock(arch_rwlock    
125 {                                                 
126         unsigned long tmp;                        
127                                                   
128         __asm__ __volatile__ (                    
129                 "1:                               
130                 "movli.l        @%1, %0 ! arch    
131                 "cmp/hs         %2, %0            
132                 "bf             1b                
133                 "sub            %2, %0            
134                 "movco.l        %0, @%1           
135                 "bf             1b                
136                 : "=&z" (tmp)                     
137                 : "r" (&rw->lock), "r" (RW_LOC    
138                 : "t", "memory"                   
139         );                                        
140 }                                                 
141                                                   
142 static inline void arch_write_unlock(arch_rwlo    
143 {                                                 
144         __asm__ __volatile__ (                    
145                 "mov.l          %1, @%0 ! arch    
146                 :                                 
147                 : "r" (&rw->lock), "r" (RW_LOC    
148                 : "t", "memory"                   
149         );                                        
150 }                                                 
151                                                   
152 static inline int arch_read_trylock(arch_rwloc    
153 {                                                 
154         unsigned long tmp, oldval;                
155                                                   
156         __asm__ __volatile__ (                    
157                 "1:                               
158                 "movli.l        @%2, %0 ! arch    
159                 "mov            %0, %1            
160                 "cmp/pl         %0                
161                 "bf             2f                
162                 "add            #-1, %0           
163                 "movco.l        %0, @%2           
164                 "bf             1b                
165                 "2:                               
166                 "synco                            
167                 : "=&z" (tmp), "=&r" (oldval)     
168                 : "r" (&rw->lock)                 
169                 : "t", "memory"                   
170         );                                        
171                                                   
172         return (oldval > 0);                      
173 }                                                 
174                                                   
175 static inline int arch_write_trylock(arch_rwlo    
176 {                                                 
177         unsigned long tmp, oldval;                
178                                                   
179         __asm__ __volatile__ (                    
180                 "1:                               
181                 "movli.l        @%2, %0 ! arch    
182                 "mov            %0, %1            
183                 "cmp/hs         %3, %0            
184                 "bf             2f                
185                 "sub            %3, %0            
186                 "2:                               
187                 "movco.l        %0, @%2           
188                 "bf             1b                
189                 "synco                            
190                 : "=&z" (tmp), "=&r" (oldval)     
191                 : "r" (&rw->lock), "r" (RW_LOC    
192                 : "t", "memory"                   
193         );                                        
194                                                   
195         return (oldval > (RW_LOCK_BIAS - 1));     
196 }                                                 
197                                                   
198 #endif /* __ASM_SH_SPINLOCK_LLSC_H */             
199                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php