~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/parisc/include/asm/atomic.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/parisc/include/asm/atomic.h (Architecture mips) and /arch/sparc64/include/asm-sparc64/atomic.h (Architecture sparc64)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 
  2 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tu    
  3  * Copyright (C) 2006 Kyle McMartin <kyle@pari    
  4  */                                               
  5                                                   
  6 #ifndef _ASM_PARISC_ATOMIC_H_                     
  7 #define _ASM_PARISC_ATOMIC_H_                     
  8                                                   
  9 #include <linux/types.h>                          
 10 #include <asm/cmpxchg.h>                          
 11 #include <asm/barrier.h>                          
 12                                                   
 13 /*                                                
 14  * Atomic operations that C can't guarantee us    
 15  * resource counting etc..                        
 16  *                                                
 17  * And probably incredibly slow on parisc.  OT    
 18  * have to write any serious assembly.   prump    
 19  */                                               
 20                                                   
 21 #ifdef CONFIG_SMP                                 
 22 #include <asm/spinlock.h>                         
 23 #include <asm/cache.h>          /* we use L1_C    
 24                                                   
 25 /* Use an array of spinlocks for our atomic_ts    
 26  * Hash function to index into a different SPI    
 27  * Since "a" is usually an address, use one sp    
 28  */                                               
 29 #  define ATOMIC_HASH_SIZE 4                      
 30 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((    
 31                                                   
 32 extern arch_spinlock_t __atomic_hash[ATOMIC_HA    
 33                                                   
 34 /* Can't use raw_spin_lock_irq because of #inc    
 35  * this is the substitute */                      
 36 #define _atomic_spin_lock_irqsave(l,f) do {       
 37         arch_spinlock_t *s = ATOMIC_HASH(l);      
 38         local_irq_save(f);                        
 39         arch_spin_lock(s);                        
 40 } while(0)                                        
 41                                                   
 42 #define _atomic_spin_unlock_irqrestore(l,f) do    
 43         arch_spinlock_t *s = ATOMIC_HASH(l);      
 44         arch_spin_unlock(s);                      
 45         local_irq_restore(f);                     
 46 } while(0)                                        
 47                                                   
 48                                                   
 49 #else                                             
 50 #  define _atomic_spin_lock_irqsave(l,f) do {     
 51 #  define _atomic_spin_unlock_irqrestore(l,f)     
 52 #endif                                            
 53                                                   
 54 /*                                                
 55  * Note that we need not lock read accesses -     
 56  * are atomic, so a reader never sees inconsis    
 57  */                                               
 58                                                   
 59 static __inline__ void arch_atomic_set(atomic_    
 60 {                                                 
 61         unsigned long flags;                      
 62         _atomic_spin_lock_irqsave(v, flags);      
 63                                                   
 64         v->counter = i;                           
 65                                                   
 66         _atomic_spin_unlock_irqrestore(v, flag    
 67 }                                                 
 68                                                   
 69 #define arch_atomic_set_release(v, i)   arch_a    
 70                                                   
 71 static __inline__ int arch_atomic_read(const a    
 72 {                                                 
 73         return READ_ONCE((v)->counter);           
 74 }                                                 
 75                                                   
 76 #define ATOMIC_OP(op, c_op)                       
 77 static __inline__ void arch_atomic_##op(int i,    
 78 {                                                 
 79         unsigned long flags;                      
 80                                                   
 81         _atomic_spin_lock_irqsave(v, flags);      
 82         v->counter c_op i;                        
 83         _atomic_spin_unlock_irqrestore(v, flag    
 84 }                                                 
 85                                                   
 86 #define ATOMIC_OP_RETURN(op, c_op)                
 87 static __inline__ int arch_atomic_##op##_retur    
 88 {                                                 
 89         unsigned long flags;                      
 90         int ret;                                  
 91                                                   
 92         _atomic_spin_lock_irqsave(v, flags);      
 93         ret = (v->counter c_op i);                
 94         _atomic_spin_unlock_irqrestore(v, flag    
 95                                                   
 96         return ret;                               
 97 }                                                 
 98                                                   
 99 #define ATOMIC_FETCH_OP(op, c_op)                 
100 static __inline__ int arch_atomic_fetch_##op(i    
101 {                                                 
102         unsigned long flags;                      
103         int ret;                                  
104                                                   
105         _atomic_spin_lock_irqsave(v, flags);      
106         ret = v->counter;                         
107         v->counter c_op i;                        
108         _atomic_spin_unlock_irqrestore(v, flag    
109                                                   
110         return ret;                               
111 }                                                 
112                                                   
113 #define ATOMIC_OPS(op, c_op)                      
114         ATOMIC_OP(op, c_op)                       
115         ATOMIC_OP_RETURN(op, c_op)                
116         ATOMIC_FETCH_OP(op, c_op)                 
117                                                   
118 ATOMIC_OPS(add, +=)                               
119 ATOMIC_OPS(sub, -=)                               
120                                                   
121 #define arch_atomic_add_return  arch_atomic_ad    
122 #define arch_atomic_sub_return  arch_atomic_su    
123 #define arch_atomic_fetch_add   arch_atomic_fe    
124 #define arch_atomic_fetch_sub   arch_atomic_fe    
125                                                   
126 #undef ATOMIC_OPS                                 
127 #define ATOMIC_OPS(op, c_op)                      
128         ATOMIC_OP(op, c_op)                       
129         ATOMIC_FETCH_OP(op, c_op)                 
130                                                   
131 ATOMIC_OPS(and, &=)                               
132 ATOMIC_OPS(or, |=)                                
133 ATOMIC_OPS(xor, ^=)                               
134                                                   
135 #define arch_atomic_fetch_and   arch_atomic_fe    
136 #define arch_atomic_fetch_or    arch_atomic_fe    
137 #define arch_atomic_fetch_xor   arch_atomic_fe    
138                                                   
139 #undef ATOMIC_OPS                                 
140 #undef ATOMIC_FETCH_OP                            
141 #undef ATOMIC_OP_RETURN                           
142 #undef ATOMIC_OP                                  
143                                                   
144 #ifdef CONFIG_64BIT                               
145                                                   
146 #define ATOMIC64_INIT(i) { (i) }                  
147                                                   
148 #define ATOMIC64_OP(op, c_op)                     
149 static __inline__ void arch_atomic64_##op(s64     
150 {                                                 
151         unsigned long flags;                      
152                                                   
153         _atomic_spin_lock_irqsave(v, flags);      
154         v->counter c_op i;                        
155         _atomic_spin_unlock_irqrestore(v, flag    
156 }                                                 
157                                                   
158 #define ATOMIC64_OP_RETURN(op, c_op)              
159 static __inline__ s64 arch_atomic64_##op##_ret    
160 {                                                 
161         unsigned long flags;                      
162         s64 ret;                                  
163                                                   
164         _atomic_spin_lock_irqsave(v, flags);      
165         ret = (v->counter c_op i);                
166         _atomic_spin_unlock_irqrestore(v, flag    
167                                                   
168         return ret;                               
169 }                                                 
170                                                   
171 #define ATOMIC64_FETCH_OP(op, c_op)               
172 static __inline__ s64 arch_atomic64_fetch_##op    
173 {                                                 
174         unsigned long flags;                      
175         s64 ret;                                  
176                                                   
177         _atomic_spin_lock_irqsave(v, flags);      
178         ret = v->counter;                         
179         v->counter c_op i;                        
180         _atomic_spin_unlock_irqrestore(v, flag    
181                                                   
182         return ret;                               
183 }                                                 
184                                                   
185 #define ATOMIC64_OPS(op, c_op)                    
186         ATOMIC64_OP(op, c_op)                     
187         ATOMIC64_OP_RETURN(op, c_op)              
188         ATOMIC64_FETCH_OP(op, c_op)               
189                                                   
190 ATOMIC64_OPS(add, +=)                             
191 ATOMIC64_OPS(sub, -=)                             
192                                                   
193 #define arch_atomic64_add_return        arch_a    
194 #define arch_atomic64_sub_return        arch_a    
195 #define arch_atomic64_fetch_add         arch_a    
196 #define arch_atomic64_fetch_sub         arch_a    
197                                                   
198 #undef ATOMIC64_OPS                               
199 #define ATOMIC64_OPS(op, c_op)                    
200         ATOMIC64_OP(op, c_op)                     
201         ATOMIC64_FETCH_OP(op, c_op)               
202                                                   
203 ATOMIC64_OPS(and, &=)                             
204 ATOMIC64_OPS(or, |=)                              
205 ATOMIC64_OPS(xor, ^=)                             
206                                                   
207 #define arch_atomic64_fetch_and         arch_a    
208 #define arch_atomic64_fetch_or          arch_a    
209 #define arch_atomic64_fetch_xor         arch_a    
210                                                   
211 #undef ATOMIC64_OPS                               
212 #undef ATOMIC64_FETCH_OP                          
213 #undef ATOMIC64_OP_RETURN                         
214 #undef ATOMIC64_OP                                
215                                                   
216 static __inline__ void                            
217 arch_atomic64_set(atomic64_t *v, s64 i)           
218 {                                                 
219         unsigned long flags;                      
220         _atomic_spin_lock_irqsave(v, flags);      
221                                                   
222         v->counter = i;                           
223                                                   
224         _atomic_spin_unlock_irqrestore(v, flag    
225 }                                                 
226                                                   
227 #define arch_atomic64_set_release(v, i) arch_a    
228                                                   
229 static __inline__ s64                             
230 arch_atomic64_read(const atomic64_t *v)           
231 {                                                 
232         return READ_ONCE((v)->counter);           
233 }                                                 
234                                                   
235 #endif /* !CONFIG_64BIT */                        
236                                                   
237                                                   
238 #endif /* _ASM_PARISC_ATOMIC_H_ */                
239                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php