~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arc/include/asm/atomic64-arcv2.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arc/include/asm/atomic64-arcv2.h (Architecture m68k) and /arch/alpha/include/asm-alpha/atomic64-arcv2.h (Architecture alpha)


  1 /* SPDX-License-Identifier: GPL-2.0-only */         1 
  2                                                   
  3 /*                                                
  4  * ARCv2 supports 64-bit exclusive load (LLOCK    
  5  *  - The address HAS to be 64-bit aligned        
  6  */                                               
  7                                                   
  8 #ifndef _ASM_ARC_ATOMIC64_ARCV2_H                 
  9 #define _ASM_ARC_ATOMIC64_ARCV2_H                 
 10                                                   
 11 typedef struct {                                  
 12         s64 __aligned(8) counter;                 
 13 } atomic64_t;                                     
 14                                                   
 15 #define ATOMIC64_INIT(a) { (a) }                  
 16                                                   
 17 static inline s64 arch_atomic64_read(const ato    
 18 {                                                 
 19         s64 val;                                  
 20                                                   
 21         __asm__ __volatile__(                     
 22         "       ldd   %0, [%1]  \n"               
 23         : "=r"(val)                               
 24         : "r"(&v->counter));                      
 25                                                   
 26         return val;                               
 27 }                                                 
 28                                                   
 29 static inline void arch_atomic64_set(atomic64_    
 30 {                                                 
 31         /*                                        
 32          * This could have been a simple assig    
 33          * explicit volatile. Otherwise gcc op    
 34          * which borked atomic64 self-test        
 35          * In the inline asm version, memory c    
 36          * reason, to tell gcc about the store    
 37          *                                        
 38          * This however is not needed for sibl    
 39          * load/store are explicitly done in i    
 40          * for each access, gcc has no way to     
 41          */                                       
 42         __asm__ __volatile__(                     
 43         "       std   %0, [%1]  \n"               
 44         :                                         
 45         : "r"(a), "r"(&v->counter)                
 46         : "memory");                              
 47 }                                                 
 48                                                   
 49 #define ATOMIC64_OP(op, op1, op2)                 
 50 static inline void arch_atomic64_##op(s64 a, a    
 51 {                                                 
 52         s64 val;                                  
 53                                                   
 54         __asm__ __volatile__(                     
 55         "1:                             \n"       
 56         "       llockd  %0, [%1]        \n"       
 57         "       " #op1 " %L0, %L0, %L2  \n"       
 58         "       " #op2 " %H0, %H0, %H2  \n"       
 59         "       scondd   %0, [%1]       \n"       
 60         "       bnz     1b              \n"       
 61         : "=&r"(val)                              
 62         : "r"(&v->counter), "ir"(a)               
 63         : "cc", "memory");                        
 64 }                                                 
 65                                                   
 66 #define ATOMIC64_OP_RETURN(op, op1, op2)          
 67 static inline s64 arch_atomic64_##op##_return_    
 68 {                                                 
 69         s64 val;                                  
 70                                                   
 71         __asm__ __volatile__(                     
 72         "1:                             \n"       
 73         "       llockd   %0, [%1]       \n"       
 74         "       " #op1 " %L0, %L0, %L2  \n"       
 75         "       " #op2 " %H0, %H0, %H2  \n"       
 76         "       scondd   %0, [%1]       \n"       
 77         "       bnz     1b              \n"       
 78         : [val] "=&r"(val)                        
 79         : "r"(&v->counter), "ir"(a)               
 80         : "cc", "memory");                        
 81                                                   
 82         return val;                               
 83 }                                                 
 84                                                   
 85 #define arch_atomic64_add_return_relaxed          
 86 #define arch_atomic64_sub_return_relaxed          
 87                                                   
 88 #define ATOMIC64_FETCH_OP(op, op1, op2)           
 89 static inline s64 arch_atomic64_fetch_##op##_r    
 90 {                                                 
 91         s64 val, orig;                            
 92                                                   
 93         __asm__ __volatile__(                     
 94         "1:                             \n"       
 95         "       llockd   %0, [%2]       \n"       
 96         "       " #op1 " %L1, %L0, %L3  \n"       
 97         "       " #op2 " %H1, %H0, %H3  \n"       
 98         "       scondd   %1, [%2]       \n"       
 99         "       bnz     1b              \n"       
100         : "=&r"(orig), "=&r"(val)                 
101         : "r"(&v->counter), "ir"(a)               
102         : "cc", "memory");                        
103                                                   
104         return orig;                              
105 }                                                 
106                                                   
107 #define arch_atomic64_fetch_add_relaxed           
108 #define arch_atomic64_fetch_sub_relaxed           
109                                                   
110 #define arch_atomic64_fetch_and_relaxed           
111 #define arch_atomic64_fetch_andnot_relaxed        
112 #define arch_atomic64_fetch_or_relaxed            
113 #define arch_atomic64_fetch_xor_relaxed           
114                                                   
115 #define ATOMIC64_OPS(op, op1, op2)                
116         ATOMIC64_OP(op, op1, op2)                 
117         ATOMIC64_OP_RETURN(op, op1, op2)          
118         ATOMIC64_FETCH_OP(op, op1, op2)           
119                                                   
120 ATOMIC64_OPS(add, add.f, adc)                     
121 ATOMIC64_OPS(sub, sub.f, sbc)                     
122                                                   
123 #undef ATOMIC64_OPS                               
124 #define ATOMIC64_OPS(op, op1, op2)                
125         ATOMIC64_OP(op, op1, op2)                 
126         ATOMIC64_FETCH_OP(op, op1, op2)           
127                                                   
128 ATOMIC64_OPS(and, and, and)                       
129 ATOMIC64_OPS(andnot, bic, bic)                    
130 ATOMIC64_OPS(or, or, or)                          
131 ATOMIC64_OPS(xor, xor, xor)                       
132                                                   
133 #define arch_atomic64_andnot            arch_a    
134                                                   
135 #undef ATOMIC64_OPS                               
136 #undef ATOMIC64_FETCH_OP                          
137 #undef ATOMIC64_OP_RETURN                         
138 #undef ATOMIC64_OP                                
139                                                   
140 static inline s64                                 
141 arch_atomic64_cmpxchg(atomic64_t *ptr, s64 exp    
142 {                                                 
143         s64 prev;                                 
144                                                   
145         smp_mb();                                 
146                                                   
147         __asm__ __volatile__(                     
148         "1:     llockd  %0, [%1]        \n"       
149         "       brne    %L0, %L2, 2f    \n"       
150         "       brne    %H0, %H2, 2f    \n"       
151         "       scondd  %3, [%1]        \n"       
152         "       bnz     1b              \n"       
153         "2:                             \n"       
154         : "=&r"(prev)                             
155         : "r"(ptr), "ir"(expected), "r"(new)      
156         : "cc");        /* memory clobber come    
157                                                   
158         smp_mb();                                 
159                                                   
160         return prev;                              
161 }                                                 
162 #define arch_atomic64_cmpxchg arch_atomic64_cm    
163                                                   
164 static inline s64 arch_atomic64_xchg(atomic64_    
165 {                                                 
166         s64 prev;                                 
167                                                   
168         smp_mb();                                 
169                                                   
170         __asm__ __volatile__(                     
171         "1:     llockd  %0, [%1]        \n"       
172         "       scondd  %2, [%1]        \n"       
173         "       bnz     1b              \n"       
174         "2:                             \n"       
175         : "=&r"(prev)                             
176         : "r"(ptr), "r"(new)                      
177         : "cc");        /* memory clobber come    
178                                                   
179         smp_mb();                                 
180                                                   
181         return prev;                              
182 }                                                 
183 #define arch_atomic64_xchg arch_atomic64_xchg     
184                                                   
185 static inline s64 arch_atomic64_dec_if_positiv    
186 {                                                 
187         s64 val;                                  
188                                                   
189         smp_mb();                                 
190                                                   
191         __asm__ __volatile__(                     
192         "1:     llockd  %0, [%1]        \n"       
193         "       sub.f   %L0, %L0, 1     # w0 -    
194         "       sub.c   %H0, %H0, 1     # if C    
195         "       brlt    %H0, 0, 2f      \n"       
196         "       scondd  %0, [%1]        \n"       
197         "       bnz     1b              \n"       
198         "2:                             \n"       
199         : "=&r"(val)                              
200         : "r"(&v->counter)                        
201         : "cc");        /* memory clobber come    
202                                                   
203         smp_mb();                                 
204                                                   
205         return val;                               
206 }                                                 
207 #define arch_atomic64_dec_if_positive arch_ato    
208                                                   
209 static inline s64 arch_atomic64_fetch_add_unle    
210 {                                                 
211         s64 old, temp;                            
212                                                   
213         smp_mb();                                 
214                                                   
215         __asm__ __volatile__(                     
216         "1:     llockd  %0, [%2]        \n"       
217         "       brne    %L0, %L4, 2f    # cont    
218         "       breq.d  %H0, %H4, 3f    # retu    
219         "2:                             \n"       
220         "       add.f   %L1, %L0, %L3   \n"       
221         "       adc     %H1, %H0, %H3   \n"       
222         "       scondd  %1, [%2]        \n"       
223         "       bnz     1b              \n"       
224         "3:                             \n"       
225         : "=&r"(old), "=&r" (temp)                
226         : "r"(&v->counter), "r"(a), "r"(u)        
227         : "cc");        /* memory clobber come    
228                                                   
229         smp_mb();                                 
230                                                   
231         return old;                               
232 }                                                 
233 #define arch_atomic64_fetch_add_unless arch_at    
234                                                   
235 #endif                                            
236                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php