~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/include/asm/mcpm.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm/include/asm/mcpm.h (Architecture sparc) and /arch/mips/include/asm-mips/mcpm.h (Architecture mips)


  1 /* SPDX-License-Identifier: GPL-2.0-only */         1 
  2 /*                                                
  3  * arch/arm/include/asm/mcpm.h                    
  4  *                                                
  5  * Created by:  Nicolas Pitre, April 2012         
  6  * Copyright:   (C) 2012-2013  Linaro Limited     
  7  */                                               
  8                                                   
  9 #ifndef MCPM_H                                    
 10 #define MCPM_H                                    
 11                                                   
 12 /*                                                
 13  * Maximum number of possible clusters / CPUs     
 14  *                                                
 15  * This should be sufficient for quite a while    
 16  * (assembly) code simpler.  When this starts     
 17  * to consider dynamic allocation.                
 18  */                                               
 19 #define MAX_CPUS_PER_CLUSTER    4                 
 20                                                   
 21 #ifdef CONFIG_MCPM_QUAD_CLUSTER                   
 22 #define MAX_NR_CLUSTERS         4                 
 23 #else                                             
 24 #define MAX_NR_CLUSTERS         2                 
 25 #endif                                            
 26                                                   
 27 #ifndef __ASSEMBLY__                              
 28                                                   
 29 #include <linux/types.h>                          
 30 #include <asm/cacheflush.h>                       
 31                                                   
 32 /*                                                
 33  * Platform specific code should use this symb    
 34  * entry location for processors to use when r    
 35  */                                               
 36 extern void mcpm_entry_point(void);               
 37                                                   
 38 /*                                                
 39  * This is used to indicate where the given CP    
 40  * branch once it is ready to re-enter the ker    
 41  * should be gated.  A gated CPU is held in a     
 42  * becomes non NULL.                              
 43  */                                               
 44 void mcpm_set_entry_vector(unsigned cpu, unsig    
 45                                                   
 46 /*                                                
 47  * This sets an early poke i.e a value to be p    
 48  * from very early assembly code before the CP    
 49  * address must be physical, and if 0 then not    
 50  */                                               
 51 void mcpm_set_early_poke(unsigned cpu, unsigne    
 52                          unsigned long poke_ph    
 53                                                   
 54 /*                                                
 55  * CPU/cluster power operations API for higher    
 56  */                                               
 57                                                   
 58 /**                                               
 59  * mcpm_is_available - returns whether MCPM is    
 60  *                                                
 61  * This returns true or false accordingly.        
 62  */                                               
 63 bool mcpm_is_available(void);                     
 64                                                   
 65 /**                                               
 66  * mcpm_cpu_power_up - make given CPU in given    
 67  *                                                
 68  * @cpu: CPU number within given cluster          
 69  * @cluster: cluster number for the CPU           
 70  *                                                
 71  * The identified CPU is brought out of reset.    
 72  * down then it is brought up as well, taking     
 73  * in the cluster run, and ensuring appropriat    
 74  *                                                
 75  * Caller must ensure the appropriate entry ve    
 76  * mcpm_set_entry_vector() prior to calling th    
 77  *                                                
 78  * This must be called in a sleepable context.    
 79  * is strongly encouraged to return early and     
 80  * asynchronously, especially when significant    
 81  *                                                
 82  * If the operation cannot be performed then a    
 83  */                                               
 84 int mcpm_cpu_power_up(unsigned int cpu, unsign    
 85                                                   
 86 /**                                               
 87  * mcpm_cpu_power_down - power the calling CPU    
 88  *                                                
 89  * The calling CPU is powered down.               
 90  *                                                
 91  * If this CPU is found to be the "last man st    
 92  * then the cluster is prepared for power-down    
 93  *                                                
 94  * This must be called with interrupts disable    
 95  *                                                
 96  * On success this does not return.  Re-entry     
 97  * via mcpm_entry_point.                          
 98  *                                                
 99  * This will return if mcpm_platform_register(    
100  * previously in which case the caller should     
101  *                                                
102  * On success, the CPU is not guaranteed to be    
103  * mcpm_wait_for_cpu_powerdown() subsequently     
104  * specified cpu.  Until then, other CPUs shou    
105  * trash memory the target CPU might be execut    
106  */                                               
107 void mcpm_cpu_power_down(void);                   
108                                                   
109 /**                                               
110  * mcpm_wait_for_cpu_powerdown - wait for a sp    
111  *      make sure it is powered off               
112  *                                                
113  * @cpu: CPU number within given cluster          
114  * @cluster: cluster number for the CPU           
115  *                                                
116  * Call this function to ensure that a pending    
117  * effect and the CPU is safely parked before     
118  * operations that may affect the CPU (such as    
119  * kernel text).                                  
120  *                                                
121  * It is *not* necessary to call this function    
122  * serialise a pending powerdown with mcpm_cpu    
123  * event.                                         
124  *                                                
125  * Do not call this function unless the specif    
126  * called mcpm_cpu_power_down() or has committ    
127  *                                                
128  * @return:                                       
129  *      - zero if the CPU is in a safely parke    
130  *      - nonzero otherwise (e.g., timeout)       
131  */                                               
132 int mcpm_wait_for_cpu_powerdown(unsigned int c    
133                                                   
134 /**                                               
135  * mcpm_cpu_suspend - bring the calling CPU in    
136  *                                                
137  * The calling CPU is suspended.  This is simi    
138  * except for possible extra platform specific    
139  * an asynchronous wake-up e.g. with a pending    
140  *                                                
141  * If this CPU is found to be the "last man st    
142  * then the cluster may be prepared for power-    
143  *                                                
144  * This must be called with interrupts disable    
145  *                                                
146  * On success this does not return.  Re-entry     
147  * via mcpm_entry_point.                          
148  *                                                
149  * This will return if mcpm_platform_register(    
150  * previously in which case the caller should     
151  */                                               
152 void mcpm_cpu_suspend(void);                      
153                                                   
154 /**                                               
155  * mcpm_cpu_powered_up - housekeeping workafte    
156  *                                                
157  * This lets the platform specific backend cod    
158  * work.  This must be called by the newly act    
159  * fully operational in kernel space, before i    
160  *                                                
161  * If the operation cannot be performed then a    
162  */                                               
163 int mcpm_cpu_powered_up(void);                    
164                                                   
165 /*                                                
166  * Platform specific callbacks used in the imp    
167  *                                                
168  * cpu_powerup:                                   
169  * Make given CPU runable. Called with MCPM lo    
170  * The given cluster is assumed to be set up (    
171  * been called beforehand). Must return 0 for     
172  *                                                
173  * cluster_powerup:                               
174  * Set up power for given cluster. Called with    
175  * disabled. Called before first cpu_powerup w    
176  * return 0 for success or negative error code    
177  *                                                
178  * cpu_suspend_prepare:                           
179  * Special suspend configuration. Called on ta    
180  * and IRQs disabled. This callback is optiona    
181  * before cpu_powerdown_prepare.                  
182  *                                                
183  * cpu_powerdown_prepare:                         
184  * Configure given CPU for power down. Called     
185  * held and IRQs disabled. Power down must be     
186  *                                                
187  * cluster_powerdown_prepare:                     
188  * Configure given cluster for power down. Cal    
189  * cluster with MCPM lock held and IRQs disabl    
190  * for each CPU in the cluster has happened wh    
191  *                                                
192  * cpu_cache_disable:                             
193  * Clean and disable CPU level cache for the c    
194  * disabled only. The CPU is no longer cache c    
195  * system when this returns.                      
196  *                                                
197  * cluster_cache_disable:                         
198  * Clean and disable the cluster wide cache as    
199  * for the calling CPU. No call to cpu_cache_d    
200  * CPU. Called with IRQs disabled and only whe    
201  * with their own cpu_cache_disable. The clust    
202  * with the rest of the system when this retur    
203  *                                                
204  * cpu_is_up:                                     
205  * Called on given CPU after it has been power    
206  * is held and IRQs disabled. This callback is    
207  *                                                
208  * cluster_is_up:                                 
209  * Called by the first CPU to be powered up or    
210  * The MCPM lock is held and IRQs disabled. Th    
211  * provided, it is called before cpu_is_up for    
212  *                                                
213  * wait_for_powerdown:                            
214  * Wait until given CPU is powered down. This     
215  * Some reasonable timeout must be considered.    
216  * negative error code.                           
217  */                                               
218 struct mcpm_platform_ops {                        
219         int (*cpu_powerup)(unsigned int cpu, u    
220         int (*cluster_powerup)(unsigned int cl    
221         void (*cpu_suspend_prepare)(unsigned i    
222         void (*cpu_powerdown_prepare)(unsigned    
223         void (*cluster_powerdown_prepare)(unsi    
224         void (*cpu_cache_disable)(void);          
225         void (*cluster_cache_disable)(void);      
226         void (*cpu_is_up)(unsigned int cpu, un    
227         void (*cluster_is_up)(unsigned int clu    
228         int (*wait_for_powerdown)(unsigned int    
229 };                                                
230                                                   
231 /**                                               
232  * mcpm_platform_register - register platform     
233  *                                                
234  * @ops: mcpm_platform_ops structure to regist    
235  *                                                
236  * An error is returned if the registration ha    
237  */                                               
238 int __init mcpm_platform_register(const struct    
239                                                   
240 /**                                               
241  * mcpm_sync_init - Initialize the cluster syn    
242  *                                                
243  * @power_up_setup: platform specific function    
244  *                  early CPU/cluster bringup     
245  *                                                
246  * This prepares memory used by vlocks and the    
247  * across CPUs that may have their caches acti    
248  * called only after a successful call to mcpm    
249  *                                                
250  * The power_up_setup argument is a pointer to    
251  * the MMU and caches are still disabled durin    
252  * available. The affinity level passed to tha    
253  * resource that needs to be initialized (e.g.    
254  * CPU level).  Proper exclusion mechanisms ar    
255  * point.                                         
256  */                                               
257 int __init mcpm_sync_init(                        
258         void (*power_up_setup)(unsigned int af    
259                                                   
260 /**                                               
261  * mcpm_loopback - make a run through the MCPM    
262  *                                                
263  * @cache_disable: pointer to function perform    
264  *                                                
265  * This exercises the MCPM machinery by soft r    
266  * to the MCPM low-level entry code before ret    
267  * The @cache_disable function must do the nec    
268  * let the regular kernel init code turn it ba    
269  * hotplugged in. The MCPM state machine is se    
270  * initialized meaning the power_up_setup call    
271  * will be invoked for all affinity levels. Th    
272  * some resources such as enabling the CCI tha    
273  */                                               
274 int __init mcpm_loopback(void (*cache_disable)    
275                                                   
276 void __init mcpm_smp_set_ops(void);               
277                                                   
278 /*                                                
279  * Synchronisation structures for coordinating    
280  * This is private to the MCPM core code and s    
281  * When modifying this structure, make sure yo    
282  * to match.                                      
283  */                                               
284 struct mcpm_sync_struct {                         
285         /* individual CPU states */               
286         struct {                                  
287                 s8 cpu __aligned(__CACHE_WRITE    
288         } cpus[MAX_CPUS_PER_CLUSTER];             
289                                                   
290         /* cluster state */                       
291         s8 cluster __aligned(__CACHE_WRITEBACK    
292                                                   
293         /* inbound-side state */                  
294         s8 inbound __aligned(__CACHE_WRITEBACK    
295 };                                                
296                                                   
297 struct sync_struct {                              
298         struct mcpm_sync_struct clusters[MAX_N    
299 };                                                
300                                                   
301 #else                                             
302                                                   
303 /*                                                
304  * asm-offsets.h causes trouble when included     
305  * cannot be included in asm files.  Let's wor    
306  */                                               
307 #include <asm/asm-offsets.h>                      
308 #define __CACHE_WRITEBACK_GRANULE CACHE_WRITEB    
309                                                   
310 #endif /* ! __ASSEMBLY__ */                       
311                                                   
312 /* Definitions for mcpm_sync_struct */            
313 #define CPU_DOWN                0x11              
314 #define CPU_COMING_UP           0x12              
315 #define CPU_UP                  0x13              
316 #define CPU_GOING_DOWN          0x14              
317                                                   
318 #define CLUSTER_DOWN            0x21              
319 #define CLUSTER_UP              0x22              
320 #define CLUSTER_GOING_DOWN      0x23              
321                                                   
322 #define INBOUND_NOT_COMING_UP   0x31              
323 #define INBOUND_COMING_UP       0x32              
324                                                   
325 /*                                                
326  * Offsets for the mcpm_sync_struct members, f    
327  * We don't want to make them global to the ke    
328  */                                               
329 #define MCPM_SYNC_CLUSTER_CPUS  0                 
330 #define MCPM_SYNC_CPU_SIZE      __CACHE_WRITEB    
331 #define MCPM_SYNC_CLUSTER_CLUSTER \               
332         (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CP    
333 #define MCPM_SYNC_CLUSTER_INBOUND \               
334         (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_W    
335 #define MCPM_SYNC_CLUSTER_SIZE \                  
336         (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_W    
337                                                   
338 #endif                                            
339                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php