~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/include/asm/paravirt.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/powerpc/include/asm/paravirt.h (Architecture ppc) and /arch/i386/include/asm-i386/paravirt.h (Architecture i386)


  1 /* SPDX-License-Identifier: GPL-2.0-or-later *      1 
  2 #ifndef _ASM_POWERPC_PARAVIRT_H                   
  3 #define _ASM_POWERPC_PARAVIRT_H                   
  4                                                   
  5 #include <linux/jump_label.h>                     
  6 #include <asm/smp.h>                              
  7 #ifdef CONFIG_PPC64                               
  8 #include <asm/paca.h>                             
  9 #include <asm/lppaca.h>                           
 10 #include <asm/hvcall.h>                           
 11 #endif                                            
 12                                                   
 13 #ifdef CONFIG_PPC_SPLPAR                          
 14 #include <linux/smp.h>                            
 15 #include <asm/kvm_guest.h>                        
 16 #include <asm/cputhreads.h>                       
 17                                                   
 18 DECLARE_STATIC_KEY_FALSE(shared_processor);       
 19                                                   
 20 static inline bool is_shared_processor(void)      
 21 {                                                 
 22         return static_branch_unlikely(&shared_    
 23 }                                                 
 24                                                   
 25 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING            
 26 extern struct static_key paravirt_steal_enable    
 27 extern struct static_key paravirt_steal_rq_ena    
 28                                                   
 29 u64 pseries_paravirt_steal_clock(int cpu);        
 30                                                   
 31 static inline u64 paravirt_steal_clock(int cpu    
 32 {                                                 
 33         return pseries_paravirt_steal_clock(cp    
 34 }                                                 
 35 #endif                                            
 36                                                   
 37 /* If bit 0 is set, the cpu has been ceded, co    
 38 static inline u32 yield_count_of(int cpu)         
 39 {                                                 
 40         __be32 yield_count = READ_ONCE(lppaca_    
 41         return be32_to_cpu(yield_count);          
 42 }                                                 
 43                                                   
 44 /*                                                
 45  * Spinlock code confers and prods, so don't t    
 46  * tracing code takes spinlocks which can caus    
 47  *                                                
 48  * These calls are made while the lock is not     
 49  * it can not acquire the lock, and unlock slo    
 50  * yielded). So this may not be a problem for     
 51  * tracing does not technically recurse on the    
 52  *                                                
 53  * However the queued spin lock contended path    
 54  * H_CONFER hcall is made after the task has q    
 55  * recursing on that lock will cause the task     
 56  * first instance (or worse: queued spinlocks     
 57  * never waits on more than one spinlock, so s    
 58  * corruption in the lock code).                  
 59  */                                               
 60 static inline void yield_to_preempted(int cpu,    
 61 {                                                 
 62         plpar_hcall_norets_notrace(H_CONFER, g    
 63 }                                                 
 64                                                   
 65 static inline void prod_cpu(int cpu)              
 66 {                                                 
 67         plpar_hcall_norets_notrace(H_PROD, get    
 68 }                                                 
 69                                                   
 70 static inline void yield_to_any(void)             
 71 {                                                 
 72         plpar_hcall_norets_notrace(H_CONFER, -    
 73 }                                                 
 74                                                   
 75 static inline bool is_vcpu_idle(int vcpu)         
 76 {                                                 
 77         return lppaca_of(vcpu).idle;              
 78 }                                                 
 79                                                   
 80 static inline bool vcpu_is_dispatched(int vcpu    
 81 {                                                 
 82         /*                                        
 83          * This is the yield_count.  An "odd"     
 84          * the processor is yielded (either be    
 85          * hypervisor preempt).  An even value    
 86          * currently executing.                   
 87          */                                       
 88         return (!(yield_count_of(vcpu) & 1));     
 89 }                                                 
 90 #else                                             
 91 static inline bool is_shared_processor(void)      
 92 {                                                 
 93         return false;                             
 94 }                                                 
 95                                                   
 96 static inline u32 yield_count_of(int cpu)         
 97 {                                                 
 98         return 0;                                 
 99 }                                                 
100                                                   
101 extern void ___bad_yield_to_preempted(void);      
102 static inline void yield_to_preempted(int cpu,    
103 {                                                 
104         ___bad_yield_to_preempted(); /* This w    
105 }                                                 
106                                                   
107 extern void ___bad_yield_to_any(void);            
108 static inline void yield_to_any(void)             
109 {                                                 
110         ___bad_yield_to_any(); /* This would b    
111 }                                                 
112                                                   
113 extern void ___bad_prod_cpu(void);                
114 static inline void prod_cpu(int cpu)              
115 {                                                 
116         ___bad_prod_cpu(); /* This would be a     
117 }                                                 
118                                                   
119 static inline bool is_vcpu_idle(int vcpu)         
120 {                                                 
121         return false;                             
122 }                                                 
123 static inline bool vcpu_is_dispatched(int vcpu    
124 {                                                 
125         return true;                              
126 }                                                 
127 #endif                                            
128                                                   
129 #define vcpu_is_preempted vcpu_is_preempted       
130 static inline bool vcpu_is_preempted(int cpu)     
131 {                                                 
132         /*                                        
133          * The dispatch/yield bit alone is an     
134          * whether the hypervisor has dispatch    
135          * processor. When it is clear, @cpu i    
136          * But when it is set, it means only t    
137          * other conditions. So we check other    
138          * @cpu first, resorting to the yield     
139          */                                       
140                                                   
141         /*                                        
142          * Hypervisor preemption isn't possibl    
143          * mode by definition.                    
144          */                                       
145         if (!is_shared_processor())               
146                 return false;                     
147                                                   
148         /*                                        
149          * If the hypervisor has dispatched th    
150          * processor, then the target CPU is d    
151          */                                       
152         if (vcpu_is_dispatched(cpu))              
153                 return false;                     
154                                                   
155         /*                                        
156          * if the target CPU is not dispatched    
157          * has not marked the CPU idle, then i    
158          */                                       
159         if (!is_vcpu_idle(cpu))                   
160                 return true;                      
161                                                   
162 #ifdef CONFIG_PPC_SPLPAR                          
163         if (!is_kvm_guest()) {                    
164                 int first_cpu, i;                 
165                                                   
166                 /*                                
167                  * The result of vcpu_is_preem    
168                  * speculative way, and is alw    
169                  * by events internal and exte    
170                  * be called in preemptable co    
171                  * we're not accessing per-cpu    
172                  * race destructively with Lin    
173                  * migration, and callers can     
174                  * error introduced by samplin    
175                  * pinning the task to it. So     
176                  * raw_smp_processor_id() here    
177                  * warnings that can arise fro    
178                  * in arbitrary contexts.         
179                  */                               
180                 first_cpu = cpu_first_thread_s    
181                                                   
182                 /*                                
183                  * The PowerVM hypervisor disp    
184                  * basis. So we know that a th    
185                  * cannot have been preempted     
186                  * has called H_CONFER, which     
187                  */                               
188                 if (cpu_first_thread_sibling(c    
189                         return false;             
190                                                   
191                 /*                                
192                  * The specific target CPU was    
193                  * then also check all other c    
194                  * because it does core schedu    
195                  * of the core getting preempt    
196                  * other vcpus can also be con    
197                  */                               
198                 first_cpu = cpu_first_thread_s    
199                 for (i = first_cpu; i < first_    
200                         if (i == cpu)             
201                                 continue;         
202                         if (vcpu_is_dispatched    
203                                 return false;     
204                         if (!is_vcpu_idle(i))     
205                                 return true;      
206                 }                                 
207         }                                         
208 #endif                                            
209                                                   
210         /*                                        
211          * None of the threads in target CPU's    
212          * them were preempted too. Hence assu    
213          * non-preempted.                         
214          */                                       
215         return false;                             
216 }                                                 
217                                                   
218 static inline bool pv_is_native_spin_unlock(vo    
219 {                                                 
220         return !is_shared_processor();            
221 }                                                 
222                                                   
223 #endif /* _ASM_POWERPC_PARAVIRT_H */              
224                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php