~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/hexagon/include/asm/atomic.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/hexagon/include/asm/atomic.h (Architecture alpha) and /arch/m68k/include/asm-m68k/atomic.h (Architecture m68k)


  1 /* SPDX-License-Identifier: GPL-2.0-only */         1 
  2 /*                                                
  3  * Atomic operations for the Hexagon architect    
  4  *                                                
  5  * Copyright (c) 2010-2013, The Linux Foundati    
  6  */                                               
  7                                                   
  8 #ifndef _ASM_ATOMIC_H                             
  9 #define _ASM_ATOMIC_H                             
 10                                                   
 11 #include <linux/types.h>                          
 12 #include <asm/cmpxchg.h>                          
 13 #include <asm/barrier.h>                          
 14                                                   
 15 /*  Normal writes in our arch don't clear lock    
 16                                                   
 17 static inline void arch_atomic_set(atomic_t *v    
 18 {                                                 
 19         asm volatile(                             
 20                 "1:     r6 = memw_locked(%0);\    
 21                 "       memw_locked(%0,p0) = %    
 22                 "       if (!P0) jump 1b;\n"      
 23                 :                                 
 24                 : "r" (&v->counter), "r" (new)    
 25                 : "memory", "p0", "r6"            
 26         );                                        
 27 }                                                 
 28                                                   
 29 #define arch_atomic_set_release(v, i)   arch_a    
 30                                                   
 31 #define arch_atomic_read(v)             READ_O    
 32                                                   
 33 #define ATOMIC_OP(op)                             
 34 static inline void arch_atomic_##op(int i, ato    
 35 {                                                 
 36         int output;                               
 37                                                   
 38         __asm__ __volatile__ (                    
 39                 "1:     %0 = memw_locked(%1);\    
 40                 "       %0 = "#op "(%0,%2);\n"    
 41                 "       memw_locked(%1,P3)=%0;    
 42                 "       if (!P3) jump 1b;\n"      
 43                 : "=&r" (output)                  
 44                 : "r" (&v->counter), "r" (i)      
 45                 : "memory", "p3"                  
 46         );                                        
 47 }                                                 
 48                                                   
 49 #define ATOMIC_OP_RETURN(op)                      
 50 static inline int arch_atomic_##op##_return(in    
 51 {                                                 
 52         int output;                               
 53                                                   
 54         __asm__ __volatile__ (                    
 55                 "1:     %0 = memw_locked(%1);\    
 56                 "       %0 = "#op "(%0,%2);\n"    
 57                 "       memw_locked(%1,P3)=%0;    
 58                 "       if (!P3) jump 1b;\n"      
 59                 : "=&r" (output)                  
 60                 : "r" (&v->counter), "r" (i)      
 61                 : "memory", "p3"                  
 62         );                                        
 63         return output;                            
 64 }                                                 
 65                                                   
 66 #define ATOMIC_FETCH_OP(op)                       
 67 static inline int arch_atomic_fetch_##op(int i    
 68 {                                                 
 69         int output, val;                          
 70                                                   
 71         __asm__ __volatile__ (                    
 72                 "1:     %0 = memw_locked(%2);\    
 73                 "       %1 = "#op "(%0,%3);\n"    
 74                 "       memw_locked(%2,P3)=%1;    
 75                 "       if (!P3) jump 1b;\n"      
 76                 : "=&r" (output), "=&r" (val)     
 77                 : "r" (&v->counter), "r" (i)      
 78                 : "memory", "p3"                  
 79         );                                        
 80         return output;                            
 81 }                                                 
 82                                                   
 83 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP    
 84                                                   
 85 ATOMIC_OPS(add)                                   
 86 ATOMIC_OPS(sub)                                   
 87                                                   
 88 #define arch_atomic_add_return                    
 89 #define arch_atomic_sub_return                    
 90 #define arch_atomic_fetch_add                     
 91 #define arch_atomic_fetch_sub                     
 92                                                   
 93 #undef ATOMIC_OPS                                 
 94 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FE    
 95                                                   
 96 ATOMIC_OPS(and)                                   
 97 ATOMIC_OPS(or)                                    
 98 ATOMIC_OPS(xor)                                   
 99                                                   
100 #define arch_atomic_fetch_and                     
101 #define arch_atomic_fetch_or                      
102 #define arch_atomic_fetch_xor                     
103                                                   
104 #undef ATOMIC_OPS                                 
105 #undef ATOMIC_FETCH_OP                            
106 #undef ATOMIC_OP_RETURN                           
107 #undef ATOMIC_OP                                  
108                                                   
109 static inline int arch_atomic_fetch_add_unless    
110 {                                                 
111         int __oldval;                             
112         register int tmp;                         
113                                                   
114         asm volatile(                             
115                 "1:     %0 = memw_locked(%2);"    
116                 "       {"                        
117                 "               p3 = cmp.eq(%0    
118                 "               if (p3.new) ju    
119                 "               %1 = add(%0, %    
120                 "       }"                        
121                 "       memw_locked(%2, p3) =     
122                 "       {"                        
123                 "               if (!p3) jump     
124                 "       }"                        
125                 "2:"                              
126                 : "=&r" (__oldval), "=&r" (tmp    
127                 : "r" (v), "r" (a), "r" (u)       
128                 : "memory", "p3"                  
129         );                                        
130         return __oldval;                          
131 }                                                 
132 #define arch_atomic_fetch_add_unless arch_atom    
133                                                   
134 #endif                                            
135                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php