~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcu/update.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/rcu/update.c (Version linux-6.12-rc7) and /kernel/rcu/update.c (Version linux-2.6.0)


  1 // SPDX-License-Identifier: GPL-2.0+                1 
  2 /*                                                
  3  * Read-Copy Update mechanism for mutual exclu    
  4  *                                                
  5  * Copyright IBM Corporation, 2001                
  6  *                                                
  7  * Authors: Dipankar Sarma <dipankar@in.ibm.co    
  8  *          Manfred Spraul <manfred@colorfulli    
  9  *                                                
 10  * Based on the original work by Paul McKenney    
 11  * and inputs from Rusty Russell, Andrea Arcan    
 12  * Papers:                                        
 13  * http://www.rdrop.com/users/paulmck/paper/rc    
 14  * http://lse.sourceforge.net/locking/rclock_O    
 15  *                                                
 16  * For detailed explanation of Read-Copy Updat    
 17  *              http://lse.sourceforge.net/loc    
 18  *                                                
 19  */                                               
 20 #include <linux/types.h>                          
 21 #include <linux/kernel.h>                         
 22 #include <linux/init.h>                           
 23 #include <linux/spinlock.h>                       
 24 #include <linux/smp.h>                            
 25 #include <linux/interrupt.h>                      
 26 #include <linux/sched/signal.h>                   
 27 #include <linux/sched/debug.h>                    
 28 #include <linux/torture.h>                        
 29 #include <linux/atomic.h>                         
 30 #include <linux/bitops.h>                         
 31 #include <linux/percpu.h>                         
 32 #include <linux/notifier.h>                       
 33 #include <linux/cpu.h>                            
 34 #include <linux/mutex.h>                          
 35 #include <linux/export.h>                         
 36 #include <linux/hardirq.h>                        
 37 #include <linux/delay.h>                          
 38 #include <linux/moduleparam.h>                    
 39 #include <linux/kthread.h>                        
 40 #include <linux/tick.h>                           
 41 #include <linux/rcupdate_wait.h>                  
 42 #include <linux/sched/isolation.h>                
 43 #include <linux/kprobes.h>                        
 44 #include <linux/slab.h>                           
 45 #include <linux/irq_work.h>                       
 46 #include <linux/rcupdate_trace.h>                 
 47                                                   
 48 #define CREATE_TRACE_POINTS                       
 49                                                   
 50 #include "rcu.h"                                  
 51                                                   
 52 #ifdef MODULE_PARAM_PREFIX                        
 53 #undef MODULE_PARAM_PREFIX                        
 54 #endif                                            
 55 #define MODULE_PARAM_PREFIX "rcupdate."           
 56                                                   
 57 #ifndef CONFIG_TINY_RCU                           
 58 module_param(rcu_expedited, int, 0444);           
 59 module_param(rcu_normal, int, 0444);              
 60 static int rcu_normal_after_boot = IS_ENABLED(    
 61 #if !defined(CONFIG_PREEMPT_RT) || defined(CON    
 62 module_param(rcu_normal_after_boot, int, 0444)    
 63 #endif                                            
 64 #endif /* #ifndef CONFIG_TINY_RCU */              
 65                                                   
 66 #ifdef CONFIG_DEBUG_LOCK_ALLOC                    
 67 /**                                               
 68  * rcu_read_lock_held_common() - might we be i    
 69  * @ret:        Best guess answer if lockdep c    
 70  *                                                
 71  * Returns true if lockdep must be ignored, in    
 72  * the best guess described below.  Otherwise     
 73  * case ``*ret`` tells the caller nothing and     
 74  * consult lockdep.                               
 75  *                                                
 76  * If CONFIG_DEBUG_LOCK_ALLOC is selected, set    
 77  * RCU-sched read-side critical section.  In a    
 78  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we ar    
 79  * critical section unless it can prove otherw    
 80  * of preemption (including disabling irqs) co    
 81  * read-side critical section.  This is useful    
 82  * that required that they be called within an    
 83  * critical section.                              
 84  *                                                
 85  * Check debug_lockdep_rcu_enabled() to preven    
 86  * and while lockdep is disabled.                 
 87  *                                                
 88  * Note that if the CPU is in the idle loop fr    
 89  * that we are in the section between ct_idle_    
 90  * then rcu_read_lock_held() sets ``*ret`` to     
 91  * rcu_read_lock().  The reason for this is th    
 92  * in such a section, considering these as in     
 93  * so such a CPU is effectively never in an RC    
 94  * regardless of what RCU primitives it invoke    
 95  * required --- we need to keep an RCU-free wi    
 96  * possibly enter into low power mode. This wa    
 97  * quiescent state to other CPUs that started     
 98  * we would delay any grace period as long as     
 99  *                                                
100  * Similarly, we avoid claiming an RCU read lo    
101  * CPU is offline.                                
102  */                                               
103 static bool rcu_read_lock_held_common(bool *re    
104 {                                                 
105         if (!debug_lockdep_rcu_enabled()) {       
106                 *ret = true;                      
107                 return true;                      
108         }                                         
109         if (!rcu_is_watching()) {                 
110                 *ret = false;                     
111                 return true;                      
112         }                                         
113         if (!rcu_lockdep_current_cpu_online())    
114                 *ret = false;                     
115                 return true;                      
116         }                                         
117         return false;                             
118 }                                                 
119                                                   
120 int rcu_read_lock_sched_held(void)                
121 {                                                 
122         bool ret;                                 
123                                                   
124         if (rcu_read_lock_held_common(&ret))      
125                 return ret;                       
126         return lock_is_held(&rcu_sched_lock_ma    
127 }                                                 
128 EXPORT_SYMBOL(rcu_read_lock_sched_held);          
129 #endif                                            
130                                                   
131 #ifndef CONFIG_TINY_RCU                           
132                                                   
133 /*                                                
134  * Should expedited grace-period primitives al    
135  * non-expedited counterparts?  Intended for u    
136  * that if the user specifies both rcu_expedit    
137  * rcu_normal wins.  (Except during the time p    
138  * when the first task is spawned until the rc    
139  * core_initcall() is invoked, at which point     
140  */                                               
141 bool rcu_gp_is_normal(void)                       
142 {                                                 
143         return READ_ONCE(rcu_normal) &&           
144                rcu_scheduler_active != RCU_SCH    
145 }                                                 
146 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);              
147                                                   
148 static atomic_t rcu_async_hurry_nesting = ATOM    
149 /*                                                
150  * Should call_rcu() callbacks be processed wi    
151  * they OK being executed with arbitrary delay    
152  */                                               
153 bool rcu_async_should_hurry(void)                 
154 {                                                 
155         return !IS_ENABLED(CONFIG_RCU_LAZY) ||    
156                atomic_read(&rcu_async_hurry_ne    
157 }                                                 
158 EXPORT_SYMBOL_GPL(rcu_async_should_hurry);        
159                                                   
160 /**                                               
161  * rcu_async_hurry - Make future async RCU cal    
162  *                                                
163  * After a call to this function, future calls    
164  * will be processed in a timely fashion.         
165  */                                               
166 void rcu_async_hurry(void)                        
167 {                                                 
168         if (IS_ENABLED(CONFIG_RCU_LAZY))          
169                 atomic_inc(&rcu_async_hurry_ne    
170 }                                                 
171 EXPORT_SYMBOL_GPL(rcu_async_hurry);               
172                                                   
173 /**                                               
174  * rcu_async_relax - Make future async RCU cal    
175  *                                                
176  * After a call to this function, future calls    
177  * will be processed in a lazy fashion.           
178  */                                               
179 void rcu_async_relax(void)                        
180 {                                                 
181         if (IS_ENABLED(CONFIG_RCU_LAZY))          
182                 atomic_dec(&rcu_async_hurry_ne    
183 }                                                 
184 EXPORT_SYMBOL_GPL(rcu_async_relax);               
185                                                   
186 static atomic_t rcu_expedited_nesting = ATOMIC    
187 /*                                                
188  * Should normal grace-period primitives be ex    
189  * use within RCU.  Note that this function ta    
190  * sysfs/boot variable and rcu_scheduler_activ    
191  * as the rcu_expedite_gp() nesting.  So loopi    
192  * until rcu_gp_is_expedited() returns false i    
193  */                                               
194 bool rcu_gp_is_expedited(void)                    
195 {                                                 
196         return rcu_expedited || atomic_read(&r    
197 }                                                 
198 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);           
199                                                   
200 /**                                               
201  * rcu_expedite_gp - Expedite future RCU grace    
202  *                                                
203  * After a call to this function, future calls    
204  * friends act as the corresponding synchroniz    
205  * had instead been called.                       
206  */                                               
207 void rcu_expedite_gp(void)                        
208 {                                                 
209         atomic_inc(&rcu_expedited_nesting);       
210 }                                                 
211 EXPORT_SYMBOL_GPL(rcu_expedite_gp);               
212                                                   
213 /**                                               
214  * rcu_unexpedite_gp - Cancel prior rcu_expedi    
215  *                                                
216  * Undo a prior call to rcu_expedite_gp().  If    
217  * rcu_expedite_gp() are undone by a subsequen    
218  * and if the rcu_expedited sysfs/boot paramet    
219  * subsequent calls to synchronize_rcu() and f    
220  * their normal non-expedited behavior.           
221  */                                               
222 void rcu_unexpedite_gp(void)                      
223 {                                                 
224         atomic_dec(&rcu_expedited_nesting);       
225 }                                                 
226 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);             
227                                                   
228 static bool rcu_boot_ended __read_mostly;         
229                                                   
230 /*                                                
231  * Inform RCU of the end of the in-kernel boot    
232  */                                               
233 void rcu_end_inkernel_boot(void)                  
234 {                                                 
235         rcu_unexpedite_gp();                      
236         rcu_async_relax();                        
237         if (rcu_normal_after_boot)                
238                 WRITE_ONCE(rcu_normal, 1);        
239         rcu_boot_ended = true;                    
240 }                                                 
241                                                   
242 /*                                                
243  * Let rcutorture know when it is OK to turn i    
244  */                                               
245 bool rcu_inkernel_boot_has_ended(void)            
246 {                                                 
247         return rcu_boot_ended;                    
248 }                                                 
249 EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended)    
250                                                   
251 #endif /* #ifndef CONFIG_TINY_RCU */              
252                                                   
253 /*                                                
254  * Test each non-SRCU synchronous grace-period    
255  * useful just after a change in mode for thes    
256  * during early boot.                             
257  */                                               
258 void rcu_test_sync_prims(void)                    
259 {                                                 
260         if (!IS_ENABLED(CONFIG_PROVE_RCU))        
261                 return;                           
262         pr_info("Running RCU synchronous self     
263         synchronize_rcu();                        
264         synchronize_rcu_expedited();              
265 }                                                 
266                                                   
267 #if !defined(CONFIG_TINY_RCU)                     
268                                                   
269 /*                                                
270  * Switch to run-time mode once RCU has fully     
271  */                                               
272 static int __init rcu_set_runtime_mode(void)      
273 {                                                 
274         rcu_test_sync_prims();                    
275         rcu_scheduler_active = RCU_SCHEDULER_R    
276         kfree_rcu_scheduler_running();            
277         rcu_test_sync_prims();                    
278         return 0;                                 
279 }                                                 
280 core_initcall(rcu_set_runtime_mode);              
281                                                   
282 #endif /* #if !defined(CONFIG_TINY_RCU) */        
283                                                   
284 #ifdef CONFIG_DEBUG_LOCK_ALLOC                    
285 static struct lock_class_key rcu_lock_key;        
286 struct lockdep_map rcu_lock_map = {               
287         .name = "rcu_read_lock",                  
288         .key = &rcu_lock_key,                     
289         .wait_type_outer = LD_WAIT_FREE,          
290         .wait_type_inner = LD_WAIT_CONFIG, /*     
291 };                                                
292 EXPORT_SYMBOL_GPL(rcu_lock_map);                  
293                                                   
294 static struct lock_class_key rcu_bh_lock_key;     
295 struct lockdep_map rcu_bh_lock_map = {            
296         .name = "rcu_read_lock_bh",               
297         .key = &rcu_bh_lock_key,                  
298         .wait_type_outer = LD_WAIT_FREE,          
299         .wait_type_inner = LD_WAIT_CONFIG, /*     
300 };                                                
301 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);               
302                                                   
303 static struct lock_class_key rcu_sched_lock_ke    
304 struct lockdep_map rcu_sched_lock_map = {         
305         .name = "rcu_read_lock_sched",            
306         .key = &rcu_sched_lock_key,               
307         .wait_type_outer = LD_WAIT_FREE,          
308         .wait_type_inner = LD_WAIT_SPIN,          
309 };                                                
310 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);            
311                                                   
312 // Tell lockdep when RCU callbacks are being i    
313 static struct lock_class_key rcu_callback_key;    
314 struct lockdep_map rcu_callback_map =             
315         STATIC_LOCKDEP_MAP_INIT("rcu_callback"    
316 EXPORT_SYMBOL_GPL(rcu_callback_map);              
317                                                   
318 noinstr int notrace debug_lockdep_rcu_enabled(    
319 {                                                 
320         return rcu_scheduler_active != RCU_SCH    
321                current->lockdep_recursion == 0    
322 }                                                 
323 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);     
324                                                   
325 /**                                               
326  * rcu_read_lock_held() - might we be in RCU r    
327  *                                                
328  * If CONFIG_DEBUG_LOCK_ALLOC is selected, ret    
329  * read-side critical section.  In absence of     
330  * this assumes we are in an RCU read-side cri    
331  * prove otherwise.  This is useful for debug     
332  * require that they be called within an RCU r    
333  *                                                
334  * Checks debug_lockdep_rcu_enabled() to preve    
335  * and while lockdep is disabled.                 
336  *                                                
337  * Note that rcu_read_lock() and the matching     
338  * occur in the same context, for example, it     
339  * rcu_read_unlock() in process context if the    
340  * was invoked from within an irq handler.        
341  *                                                
342  * Note that rcu_read_lock() is disallowed if     
343  * offline from an RCU perspective, so check f    
344  */                                               
345 int rcu_read_lock_held(void)                      
346 {                                                 
347         bool ret;                                 
348                                                   
349         if (rcu_read_lock_held_common(&ret))      
350                 return ret;                       
351         return lock_is_held(&rcu_lock_map);       
352 }                                                 
353 EXPORT_SYMBOL_GPL(rcu_read_lock_held);            
354                                                   
355 /**                                               
356  * rcu_read_lock_bh_held() - might we be in RC    
357  *                                                
358  * Check for bottom half being disabled, which    
359  * CONFIG_PROVE_RCU and not cases.  Note that     
360  * rcu_read_lock_bh(), but then later enables     
361  * will show the situation.  This is useful fo    
362  * that require that they be called within an     
363  * section.                                       
364  *                                                
365  * Check debug_lockdep_rcu_enabled() to preven    
366  *                                                
367  * Note that rcu_read_lock_bh() is disallowed     
368  * offline from an RCU perspective, so check f    
369  */                                               
370 int rcu_read_lock_bh_held(void)                   
371 {                                                 
372         bool ret;                                 
373                                                   
374         if (rcu_read_lock_held_common(&ret))      
375                 return ret;                       
376         return in_softirq() || irqs_disabled()    
377 }                                                 
378 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);         
379                                                   
380 int rcu_read_lock_any_held(void)                  
381 {                                                 
382         bool ret;                                 
383                                                   
384         if (rcu_read_lock_held_common(&ret))      
385                 return ret;                       
386         if (lock_is_held(&rcu_lock_map) ||        
387             lock_is_held(&rcu_bh_lock_map) ||     
388             lock_is_held(&rcu_sched_lock_map))    
389                 return 1;                         
390         return !preemptible();                    
391 }                                                 
392 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);        
393                                                   
394 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */       
395                                                   
396 /**                                               
397  * wakeme_after_rcu() - Callback function to a    
398  * @head: Pointer to rcu_head member within rc    
399  *                                                
400  * Awaken the corresponding task now that a gr    
401  */                                               
402 void wakeme_after_rcu(struct rcu_head *head)      
403 {                                                 
404         struct rcu_synchronize *rcu;              
405                                                   
406         rcu = container_of(head, struct rcu_sy    
407         complete(&rcu->completion);               
408 }                                                 
409 EXPORT_SYMBOL_GPL(wakeme_after_rcu);              
410                                                   
411 void __wait_rcu_gp(bool checktiny, unsigned in    
412                    struct rcu_synchronize *rs_    
413 {                                                 
414         int i;                                    
415         int j;                                    
416                                                   
417         /* Initialize and register callbacks f    
418         for (i = 0; i < n; i++) {                 
419                 if (checktiny &&                  
420                     (crcu_array[i] == call_rcu    
421                         might_sleep();            
422                         continue;                 
423                 }                                 
424                 for (j = 0; j < i; j++)           
425                         if (crcu_array[j] == c    
426                                 break;            
427                 if (j == i) {                     
428                         init_rcu_head_on_stack    
429                         init_completion(&rs_ar    
430                         (crcu_array[i])(&rs_ar    
431                 }                                 
432         }                                         
433                                                   
434         /* Wait for all callbacks to be invoke    
435         for (i = 0; i < n; i++) {                 
436                 if (checktiny &&                  
437                     (crcu_array[i] == call_rcu    
438                         continue;                 
439                 for (j = 0; j < i; j++)           
440                         if (crcu_array[j] == c    
441                                 break;            
442                 if (j == i) {                     
443                         wait_for_completion_st    
444                         destroy_rcu_head_on_st    
445                 }                                 
446         }                                         
447 }                                                 
448 EXPORT_SYMBOL_GPL(__wait_rcu_gp);                 
449                                                   
450 void finish_rcuwait(struct rcuwait *w)            
451 {                                                 
452         rcu_assign_pointer(w->task, NULL);        
453         __set_current_state(TASK_RUNNING);        
454 }                                                 
455 EXPORT_SYMBOL_GPL(finish_rcuwait);                
456                                                   
457 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD              
458 void init_rcu_head(struct rcu_head *head)         
459 {                                                 
460         debug_object_init(head, &rcuhead_debug    
461 }                                                 
462 EXPORT_SYMBOL_GPL(init_rcu_head);                 
463                                                   
464 void destroy_rcu_head(struct rcu_head *head)      
465 {                                                 
466         debug_object_free(head, &rcuhead_debug    
467 }                                                 
468 EXPORT_SYMBOL_GPL(destroy_rcu_head);              
469                                                   
470 static bool rcuhead_is_static_object(void *add    
471 {                                                 
472         return true;                              
473 }                                                 
474                                                   
475 /**                                               
476  * init_rcu_head_on_stack() - initialize on-st    
477  * @head: pointer to rcu_head structure to be     
478  *                                                
479  * This function informs debugobjects of a new    
480  * has been allocated as an auto variable on t    
481  * is not required for rcu_head structures tha    
482  * that are dynamically allocated on the heap.    
483  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD k    
484  */                                               
485 void init_rcu_head_on_stack(struct rcu_head *h    
486 {                                                 
487         debug_object_init_on_stack(head, &rcuh    
488 }                                                 
489 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);        
490                                                   
491 /**                                               
492  * destroy_rcu_head_on_stack() - destroy on-st    
493  * @head: pointer to rcu_head structure to be     
494  *                                                
495  * This function informs debugobjects that an     
496  * is about to go out of scope.  As with init_    
497  * function is not required for rcu_head struc    
498  * defined or that are dynamically allocated o    
499  * init_rcu_head_on_stack(), this function has    
500  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel build    
501  */                                               
502 void destroy_rcu_head_on_stack(struct rcu_head    
503 {                                                 
504         debug_object_free(head, &rcuhead_debug    
505 }                                                 
506 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);     
507                                                   
508 const struct debug_obj_descr rcuhead_debug_des    
509         .name = "rcu_head",                       
510         .is_static_object = rcuhead_is_static_    
511 };                                                
512 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);           
513 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD    
514                                                   
515 #if defined(CONFIG_TREE_RCU) || defined(CONFIG    
516 void do_trace_rcu_torture_read(const char *rcu    
517                                unsigned long s    
518                                unsigned long c    
519 {                                                 
520         trace_rcu_torture_read(rcutorturename,    
521 }                                                 
522 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);     
523 #else                                             
524 #define do_trace_rcu_torture_read(rcutorturena    
525         do { } while (0)                          
526 #endif                                            
527                                                   
528 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_    
529 /* Get rcutorture access to sched_setaffinity(    
530 long torture_sched_setaffinity(pid_t pid, cons    
531 {                                                 
532         int ret;                                  
533                                                   
534         ret = sched_setaffinity(pid, in_mask);    
535         WARN_ONCE(ret, "%s: sched_setaffinity(    
536         return ret;                               
537 }                                                 
538 EXPORT_SYMBOL_GPL(torture_sched_setaffinity);     
539 #endif                                            
540                                                   
541 int rcu_cpu_stall_notifiers __read_mostly; //     
542 EXPORT_SYMBOL_GPL(rcu_cpu_stall_notifiers);       
543                                                   
544 #ifdef CONFIG_RCU_STALL_COMMON                    
545 int rcu_cpu_stall_ftrace_dump __read_mostly;      
546 module_param(rcu_cpu_stall_ftrace_dump, int, 0    
547 #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER              
548 module_param(rcu_cpu_stall_notifiers, int, 044    
549 #endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER    
550 int rcu_cpu_stall_suppress __read_mostly; // !    
551 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);        
552 module_param(rcu_cpu_stall_suppress, int, 0644    
553 int rcu_cpu_stall_timeout __read_mostly = CONF    
554 module_param(rcu_cpu_stall_timeout, int, 0644)    
555 int rcu_exp_cpu_stall_timeout __read_mostly =     
556 module_param(rcu_exp_cpu_stall_timeout, int, 0    
557 int rcu_cpu_stall_cputime __read_mostly = IS_E    
558 module_param(rcu_cpu_stall_cputime, int, 0644)    
559 bool rcu_exp_stall_task_details __read_mostly;    
560 module_param(rcu_exp_stall_task_details, bool,    
561 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */       
562                                                   
563 // Suppress boot-time RCU CPU stall warnings a    
564 // warnings.  Also used by rcutorture even if     
565 int rcu_cpu_stall_suppress_at_boot __read_most    
566 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_bo    
567 module_param(rcu_cpu_stall_suppress_at_boot, i    
568                                                   
569 /**                                               
570  * get_completed_synchronize_rcu - Return a pr    
571  *                                                
572  * Returns a value that will always be treated    
573  * poll_state_synchronize_rcu() as a cookie wh    
574  * completed.                                     
575  */                                               
576 unsigned long get_completed_synchronize_rcu(vo    
577 {                                                 
578         return RCU_GET_STATE_COMPLETED;           
579 }                                                 
580 EXPORT_SYMBOL_GPL(get_completed_synchronize_rc    
581                                                   
582 #ifdef CONFIG_PROVE_RCU                           
583                                                   
584 /*                                                
585  * Early boot self test parameters.               
586  */                                               
587 static bool rcu_self_test;                        
588 module_param(rcu_self_test, bool, 0444);          
589                                                   
590 static int rcu_self_test_counter;                 
591                                                   
592 static void test_callback(struct rcu_head *r)     
593 {                                                 
594         rcu_self_test_counter++;                  
595         pr_info("RCU test callback executed %d    
596 }                                                 
597                                                   
598 DEFINE_STATIC_SRCU(early_srcu);                   
599 static unsigned long early_srcu_cookie;           
600                                                   
601 struct early_boot_kfree_rcu {                     
602         struct rcu_head rh;                       
603 };                                                
604                                                   
605 static void early_boot_test_call_rcu(void)        
606 {                                                 
607         static struct rcu_head head;              
608         int idx;                                  
609         static struct rcu_head shead;             
610         struct early_boot_kfree_rcu *rhp;         
611                                                   
612         idx = srcu_down_read(&early_srcu);        
613         srcu_up_read(&early_srcu, idx);           
614         call_rcu(&head, test_callback);           
615         early_srcu_cookie = start_poll_synchro    
616         call_srcu(&early_srcu, &shead, test_ca    
617         rhp = kmalloc(sizeof(*rhp), GFP_KERNEL    
618         if (!WARN_ON_ONCE(!rhp))                  
619                 kfree_rcu(rhp, rh);               
620 }                                                 
621                                                   
622 void rcu_early_boot_tests(void)                   
623 {                                                 
624         pr_info("Running RCU self tests\n");      
625                                                   
626         if (rcu_self_test)                        
627                 early_boot_test_call_rcu();       
628         rcu_test_sync_prims();                    
629 }                                                 
630                                                   
631 static int rcu_verify_early_boot_tests(void)      
632 {                                                 
633         int ret = 0;                              
634         int early_boot_test_counter = 0;          
635                                                   
636         if (rcu_self_test) {                      
637                 early_boot_test_counter++;        
638                 rcu_barrier();                    
639                 early_boot_test_counter++;        
640                 srcu_barrier(&early_srcu);        
641                 WARN_ON_ONCE(!poll_state_synch    
642                 cleanup_srcu_struct(&early_src    
643         }                                         
644         if (rcu_self_test_counter != early_boo    
645                 WARN_ON(1);                       
646                 ret = -1;                         
647         }                                         
648                                                   
649         return ret;                               
650 }                                                 
651 late_initcall(rcu_verify_early_boot_tests);       
652 #else                                             
653 void rcu_early_boot_tests(void) {}                
654 #endif /* CONFIG_PROVE_RCU */                     
655                                                   
656 #include "tasks.h"                                
657                                                   
658 #ifndef CONFIG_TINY_RCU                           
659                                                   
660 /*                                                
661  * Print any significant non-default boot-time    
662  */                                               
663 void __init rcupdate_announce_bootup_oddness(v    
664 {                                                 
665         if (rcu_normal)                           
666                 pr_info("\tNo expedited grace     
667         else if (rcu_normal_after_boot)           
668                 pr_info("\tNo expedited grace     
669         else if (rcu_expedited)                   
670                 pr_info("\tAll grace periods a    
671         if (rcu_cpu_stall_suppress)               
672                 pr_info("\tRCU CPU stall warni    
673         if (rcu_cpu_stall_timeout != CONFIG_RC    
674                 pr_info("\tRCU CPU stall warni    
675         rcu_tasks_bootup_oddness();               
676 }                                                 
677                                                   
678 #endif /* #ifndef CONFIG_TINY_RCU */              
679                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php