~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/include/asm/spinlock.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/s390/include/asm/spinlock.h (Architecture sparc) and /arch/sparc64/include/asm-sparc64/spinlock.h (Architecture sparc64)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 
  2 /*                                                
  3  *  S390 version                                  
  4  *    Copyright IBM Corp. 1999                    
  5  *    Author(s): Martin Schwidefsky (schwidefs    
  6  *                                                
  7  *  Derived from "include/asm-i386/spinlock.h"    
  8  */                                               
  9                                                   
 10 #ifndef __ASM_SPINLOCK_H                          
 11 #define __ASM_SPINLOCK_H                          
 12                                                   
 13 #include <linux/smp.h>                            
 14 #include <asm/atomic_ops.h>                       
 15 #include <asm/barrier.h>                          
 16 #include <asm/processor.h>                        
 17 #include <asm/alternative.h>                      
 18                                                   
 19 #define SPINLOCK_LOCKVAL (get_lowcore()->spinl    
 20                                                   
 21 extern int spin_retry;                            
 22                                                   
 23 bool arch_vcpu_is_preempted(int cpu);             
 24                                                   
 25 #define vcpu_is_preempted arch_vcpu_is_preempt    
 26                                                   
 27 /*                                                
 28  * Simple spin lock operations.  There are two    
 29  * on the local processor, one does not.          
 30  *                                                
 31  * We make no fairness assumptions. They have     
 32  *                                                
 33  * (the type definitions are in asm/spinlock_t    
 34  */                                               
 35                                                   
 36 void arch_spin_relax(arch_spinlock_t *lock);      
 37 #define arch_spin_relax arch_spin_relax           
 38                                                   
 39 void arch_spin_lock_wait(arch_spinlock_t *);      
 40 int arch_spin_trylock_retry(arch_spinlock_t *)    
 41 void arch_spin_lock_setup(int cpu);               
 42                                                   
 43 static inline u32 arch_spin_lockval(int cpu)      
 44 {                                                 
 45         return cpu + 1;                           
 46 }                                                 
 47                                                   
 48 static inline int arch_spin_value_unlocked(arc    
 49 {                                                 
 50         return lock.lock == 0;                    
 51 }                                                 
 52                                                   
 53 static inline int arch_spin_is_locked(arch_spi    
 54 {                                                 
 55         return READ_ONCE(lp->lock) != 0;          
 56 }                                                 
 57                                                   
 58 static inline int arch_spin_trylock_once(arch_    
 59 {                                                 
 60         barrier();                                
 61         return likely(__atomic_cmpxchg_bool(&l    
 62 }                                                 
 63                                                   
 64 static inline void arch_spin_lock(arch_spinloc    
 65 {                                                 
 66         if (!arch_spin_trylock_once(lp))          
 67                 arch_spin_lock_wait(lp);          
 68 }                                                 
 69                                                   
 70 static inline int arch_spin_trylock(arch_spinl    
 71 {                                                 
 72         if (!arch_spin_trylock_once(lp))          
 73                 return arch_spin_trylock_retry    
 74         return 1;                                 
 75 }                                                 
 76                                                   
 77 static inline void arch_spin_unlock(arch_spinl    
 78 {                                                 
 79         typecheck(int, lp->lock);                 
 80         kcsan_release();                          
 81         asm_inline volatile(                      
 82                 ALTERNATIVE("nop", ".insn rre,    
 83                 "       sth     %1,%0\n"          
 84                 : "=R" (((unsigned short *) &l    
 85                 : "d" (0) : "cc", "memory");      
 86 }                                                 
 87                                                   
 88 /*                                                
 89  * Read-write spinlocks, allowing multiple rea    
 90  * but only one writer.                           
 91  *                                                
 92  * NOTE! it is quite common to have readers in    
 93  * but no interrupt writers. For those circums    
 94  * can "mix" irq-safe locks - any writer needs    
 95  * irq-safe write-lock, but readers can get no    
 96  * read-locks.                                    
 97  */                                               
 98                                                   
 99 #define arch_read_relax(rw) barrier()             
100 #define arch_write_relax(rw) barrier()            
101                                                   
102 void arch_read_lock_wait(arch_rwlock_t *lp);      
103 void arch_write_lock_wait(arch_rwlock_t *lp);     
104                                                   
105 static inline void arch_read_lock(arch_rwlock_    
106 {                                                 
107         int old;                                  
108                                                   
109         old = __atomic_add(1, &rw->cnts);         
110         if (old & 0xffff0000)                     
111                 arch_read_lock_wait(rw);          
112 }                                                 
113                                                   
114 static inline void arch_read_unlock(arch_rwloc    
115 {                                                 
116         __atomic_add_const_barrier(-1, &rw->cn    
117 }                                                 
118                                                   
119 static inline void arch_write_lock(arch_rwlock    
120 {                                                 
121         if (!__atomic_cmpxchg_bool(&rw->cnts,     
122                 arch_write_lock_wait(rw);         
123 }                                                 
124                                                   
125 static inline void arch_write_unlock(arch_rwlo    
126 {                                                 
127         __atomic_add_barrier(-0x30000, &rw->cn    
128 }                                                 
129                                                   
130                                                   
131 static inline int arch_read_trylock(arch_rwloc    
132 {                                                 
133         int old;                                  
134                                                   
135         old = READ_ONCE(rw->cnts);                
136         return (!(old & 0xffff0000) &&            
137                 __atomic_cmpxchg_bool(&rw->cnt    
138 }                                                 
139                                                   
140 static inline int arch_write_trylock(arch_rwlo    
141 {                                                 
142         int old;                                  
143                                                   
144         old = READ_ONCE(rw->cnts);                
145         return !old && __atomic_cmpxchg_bool(&    
146 }                                                 
147                                                   
148 #endif /* __ASM_SPINLOCK_H */                     
149                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php