~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/parisc/include/asm/spinlock.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/parisc/include/asm/spinlock.h (Architecture mips) and /arch/alpha/include/asm-alpha/spinlock.h (Architecture alpha)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 
  2 #ifndef __ASM_SPINLOCK_H                          
  3 #define __ASM_SPINLOCK_H                          
  4                                                   
  5 #include <asm/barrier.h>                          
  6 #include <asm/ldcw.h>                             
  7 #include <asm/processor.h>                        
  8 #include <asm/spinlock_types.h>                   
  9                                                   
 10 static inline void arch_spin_val_check(int loc    
 11 {                                                 
 12         if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPIN    
 13                 asm volatile(   "andcm,= %0,%1    
 14                                 ".word %2\n"      
 15                 : : "r" (lock_val), "r" (__ARC    
 16                         "i" (SPINLOCK_BREAK_IN    
 17 }                                                 
 18                                                   
 19 static inline int arch_spin_is_locked(arch_spi    
 20 {                                                 
 21         volatile unsigned int *a;                 
 22         int lock_val;                             
 23                                                   
 24         a = __ldcw_align(x);                      
 25         lock_val = READ_ONCE(*a);                 
 26         arch_spin_val_check(lock_val);            
 27         return (lock_val == 0);                   
 28 }                                                 
 29                                                   
 30 static inline void arch_spin_lock(arch_spinloc    
 31 {                                                 
 32         volatile unsigned int *a;                 
 33                                                   
 34         a = __ldcw_align(x);                      
 35         do {                                      
 36                 int lock_val_old;                 
 37                                                   
 38                 lock_val_old = __ldcw(a);         
 39                 arch_spin_val_check(lock_val_o    
 40                 if (lock_val_old)                 
 41                         return; /* got lock */    
 42                                                   
 43                 /* wait until we should try to    
 44                 while (*a == 0)                   
 45                         continue;                 
 46         } while (1);                              
 47 }                                                 
 48                                                   
 49 static inline void arch_spin_unlock(arch_spinl    
 50 {                                                 
 51         volatile unsigned int *a;                 
 52                                                   
 53         a = __ldcw_align(x);                      
 54         /* Release with ordered store. */         
 55         __asm__ __volatile__("stw,ma %0,0(%1)"    
 56                 : : "r"(__ARCH_SPIN_LOCK_UNLOC    
 57 }                                                 
 58                                                   
 59 static inline int arch_spin_trylock(arch_spinl    
 60 {                                                 
 61         volatile unsigned int *a;                 
 62         int lock_val;                             
 63                                                   
 64         a = __ldcw_align(x);                      
 65         lock_val = __ldcw(a);                     
 66         arch_spin_val_check(lock_val);            
 67         return lock_val != 0;                     
 68 }                                                 
 69                                                   
 70 /*                                                
 71  * Read-write spinlocks, allowing multiple rea    
 72  * Unfair locking as Writers could be starved     
 73  *                                                
 74  * The spinlock itself is contained in @counte    
 75  * serialized with @lock_mutex.                   
 76  */                                               
 77                                                   
 78 /* 1 - lock taken successfully */                 
 79 static inline int arch_read_trylock(arch_rwloc    
 80 {                                                 
 81         int ret = 0;                              
 82         unsigned long flags;                      
 83                                                   
 84         local_irq_save(flags);                    
 85         arch_spin_lock(&(rw->lock_mutex));        
 86                                                   
 87         /*                                        
 88          * zero means writer holds the lock ex    
 89          * Otherwise grant lock to first/subse    
 90          */                                       
 91         if (rw->counter > 0) {                    
 92                 rw->counter--;                    
 93                 ret = 1;                          
 94         }                                         
 95                                                   
 96         arch_spin_unlock(&(rw->lock_mutex));      
 97         local_irq_restore(flags);                 
 98                                                   
 99         return ret;                               
100 }                                                 
101                                                   
102 /* 1 - lock taken successfully */                 
103 static inline int arch_write_trylock(arch_rwlo    
104 {                                                 
105         int ret = 0;                              
106         unsigned long flags;                      
107                                                   
108         local_irq_save(flags);                    
109         arch_spin_lock(&(rw->lock_mutex));        
110                                                   
111         /*                                        
112          * If reader(s) hold lock (lock < __AR    
113          * deny writer. Otherwise if unlocked     
114          * Hence the claim that Linux rwlocks     
115          * (can be starved for an indefinite t    
116          */                                       
117         if (rw->counter == __ARCH_RW_LOCK_UNLO    
118                 rw->counter = 0;                  
119                 ret = 1;                          
120         }                                         
121         arch_spin_unlock(&(rw->lock_mutex));      
122         local_irq_restore(flags);                 
123                                                   
124         return ret;                               
125 }                                                 
126                                                   
127 static inline void arch_read_lock(arch_rwlock_    
128 {                                                 
129         while (!arch_read_trylock(rw))            
130                 cpu_relax();                      
131 }                                                 
132                                                   
133 static inline void arch_write_lock(arch_rwlock    
134 {                                                 
135         while (!arch_write_trylock(rw))           
136                 cpu_relax();                      
137 }                                                 
138                                                   
139 static inline void arch_read_unlock(arch_rwloc    
140 {                                                 
141         unsigned long flags;                      
142                                                   
143         local_irq_save(flags);                    
144         arch_spin_lock(&(rw->lock_mutex));        
145         rw->counter++;                            
146         arch_spin_unlock(&(rw->lock_mutex));      
147         local_irq_restore(flags);                 
148 }                                                 
149                                                   
150 static inline void arch_write_unlock(arch_rwlo    
151 {                                                 
152         unsigned long flags;                      
153                                                   
154         local_irq_save(flags);                    
155         arch_spin_lock(&(rw->lock_mutex));        
156         rw->counter = __ARCH_RW_LOCK_UNLOCKED_    
157         arch_spin_unlock(&(rw->lock_mutex));      
158         local_irq_restore(flags);                 
159 }                                                 
160                                                   
161 #endif /* __ASM_SPINLOCK_H */                     
162                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php