~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcu/update.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/rcu/update.c (Version linux-6.12-rc7) and /kernel/rcu/update.c (Version linux-5.6.19)


  1 // SPDX-License-Identifier: GPL-2.0+                1 // SPDX-License-Identifier: GPL-2.0+
  2 /*                                                  2 /*
  3  * Read-Copy Update mechanism for mutual exclu      3  * Read-Copy Update mechanism for mutual exclusion
  4  *                                                  4  *
  5  * Copyright IBM Corporation, 2001                  5  * Copyright IBM Corporation, 2001
  6  *                                                  6  *
  7  * Authors: Dipankar Sarma <dipankar@in.ibm.co      7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  8  *          Manfred Spraul <manfred@colorfulli      8  *          Manfred Spraul <manfred@colorfullife.com>
  9  *                                                  9  *
 10  * Based on the original work by Paul McKenney     10  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
 11  * and inputs from Rusty Russell, Andrea Arcan     11  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 12  * Papers:                                         12  * Papers:
 13  * http://www.rdrop.com/users/paulmck/paper/rc     13  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 14  * http://lse.sourceforge.net/locking/rclock_O     14  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 15  *                                                 15  *
 16  * For detailed explanation of Read-Copy Updat     16  * For detailed explanation of Read-Copy Update mechanism see -
 17  *              http://lse.sourceforge.net/loc     17  *              http://lse.sourceforge.net/locking/rcupdate.html
 18  *                                                 18  *
 19  */                                                19  */
 20 #include <linux/types.h>                           20 #include <linux/types.h>
 21 #include <linux/kernel.h>                          21 #include <linux/kernel.h>
 22 #include <linux/init.h>                            22 #include <linux/init.h>
 23 #include <linux/spinlock.h>                        23 #include <linux/spinlock.h>
 24 #include <linux/smp.h>                             24 #include <linux/smp.h>
 25 #include <linux/interrupt.h>                       25 #include <linux/interrupt.h>
 26 #include <linux/sched/signal.h>                    26 #include <linux/sched/signal.h>
 27 #include <linux/sched/debug.h>                     27 #include <linux/sched/debug.h>
 28 #include <linux/torture.h>                     << 
 29 #include <linux/atomic.h>                          28 #include <linux/atomic.h>
 30 #include <linux/bitops.h>                          29 #include <linux/bitops.h>
 31 #include <linux/percpu.h>                          30 #include <linux/percpu.h>
 32 #include <linux/notifier.h>                        31 #include <linux/notifier.h>
 33 #include <linux/cpu.h>                             32 #include <linux/cpu.h>
 34 #include <linux/mutex.h>                           33 #include <linux/mutex.h>
 35 #include <linux/export.h>                          34 #include <linux/export.h>
 36 #include <linux/hardirq.h>                         35 #include <linux/hardirq.h>
 37 #include <linux/delay.h>                           36 #include <linux/delay.h>
 38 #include <linux/moduleparam.h>                     37 #include <linux/moduleparam.h>
 39 #include <linux/kthread.h>                         38 #include <linux/kthread.h>
 40 #include <linux/tick.h>                            39 #include <linux/tick.h>
 41 #include <linux/rcupdate_wait.h>                   40 #include <linux/rcupdate_wait.h>
 42 #include <linux/sched/isolation.h>                 41 #include <linux/sched/isolation.h>
 43 #include <linux/kprobes.h>                         42 #include <linux/kprobes.h>
 44 #include <linux/slab.h>                            43 #include <linux/slab.h>
 45 #include <linux/irq_work.h>                    << 
 46 #include <linux/rcupdate_trace.h>              << 
 47                                                    44 
 48 #define CREATE_TRACE_POINTS                        45 #define CREATE_TRACE_POINTS
 49                                                    46 
 50 #include "rcu.h"                                   47 #include "rcu.h"
 51                                                    48 
 52 #ifdef MODULE_PARAM_PREFIX                         49 #ifdef MODULE_PARAM_PREFIX
 53 #undef MODULE_PARAM_PREFIX                         50 #undef MODULE_PARAM_PREFIX
 54 #endif                                             51 #endif
 55 #define MODULE_PARAM_PREFIX "rcupdate."            52 #define MODULE_PARAM_PREFIX "rcupdate."
 56                                                    53 
 57 #ifndef CONFIG_TINY_RCU                            54 #ifndef CONFIG_TINY_RCU
 58 module_param(rcu_expedited, int, 0444);        !!  55 module_param(rcu_expedited, int, 0);
 59 module_param(rcu_normal, int, 0444);           !!  56 module_param(rcu_normal, int, 0);
 60 static int rcu_normal_after_boot = IS_ENABLED( !!  57 static int rcu_normal_after_boot;
 61 #if !defined(CONFIG_PREEMPT_RT) || defined(CON !!  58 module_param(rcu_normal_after_boot, int, 0);
 62 module_param(rcu_normal_after_boot, int, 0444) << 
 63 #endif                                         << 
 64 #endif /* #ifndef CONFIG_TINY_RCU */               59 #endif /* #ifndef CONFIG_TINY_RCU */
 65                                                    60 
 66 #ifdef CONFIG_DEBUG_LOCK_ALLOC                     61 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 67 /**                                                62 /**
 68  * rcu_read_lock_held_common() - might we be i     63  * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
 69  * @ret:        Best guess answer if lockdep c     64  * @ret:        Best guess answer if lockdep cannot be relied on
 70  *                                                 65  *
 71  * Returns true if lockdep must be ignored, in !!  66  * Returns true if lockdep must be ignored, in which case *ret contains
 72  * the best guess described below.  Otherwise      67  * the best guess described below.  Otherwise returns false, in which
 73  * case ``*ret`` tells the caller nothing and  !!  68  * case *ret tells the caller nothing and the caller should instead
 74  * consult lockdep.                                69  * consult lockdep.
 75  *                                                 70  *
 76  * If CONFIG_DEBUG_LOCK_ALLOC is selected, set !!  71  * If CONFIG_DEBUG_LOCK_ALLOC is selected, set *ret to nonzero iff in an
 77  * RCU-sched read-side critical section.  In a     72  * RCU-sched read-side critical section.  In absence of
 78  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we ar     73  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 79  * critical section unless it can prove otherw     74  * critical section unless it can prove otherwise.  Note that disabling
 80  * of preemption (including disabling irqs) co     75  * of preemption (including disabling irqs) counts as an RCU-sched
 81  * read-side critical section.  This is useful     76  * read-side critical section.  This is useful for debug checks in functions
 82  * that required that they be called within an     77  * that required that they be called within an RCU-sched read-side
 83  * critical section.                               78  * critical section.
 84  *                                                 79  *
 85  * Check debug_lockdep_rcu_enabled() to preven     80  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 86  * and while lockdep is disabled.                  81  * and while lockdep is disabled.
 87  *                                                 82  *
 88  * Note that if the CPU is in the idle loop fr     83  * Note that if the CPU is in the idle loop from an RCU point of view (ie:
 89  * that we are in the section between ct_idle_ !!  84  * that we are in the section between rcu_idle_enter() and rcu_idle_exit())
 90  * then rcu_read_lock_held() sets ``*ret`` to  !!  85  * then rcu_read_lock_held() sets *ret to false even if the CPU did an
 91  * rcu_read_lock().  The reason for this is th     86  * rcu_read_lock().  The reason for this is that RCU ignores CPUs that are
 92  * in such a section, considering these as in      87  * in such a section, considering these as in extended quiescent state,
 93  * so such a CPU is effectively never in an RC     88  * so such a CPU is effectively never in an RCU read-side critical section
 94  * regardless of what RCU primitives it invoke     89  * regardless of what RCU primitives it invokes.  This state of affairs is
 95  * required --- we need to keep an RCU-free wi     90  * required --- we need to keep an RCU-free window in idle where the CPU may
 96  * possibly enter into low power mode. This wa     91  * possibly enter into low power mode. This way we can notice an extended
 97  * quiescent state to other CPUs that started      92  * quiescent state to other CPUs that started a grace period. Otherwise
 98  * we would delay any grace period as long as      93  * we would delay any grace period as long as we run in the idle task.
 99  *                                                 94  *
100  * Similarly, we avoid claiming an RCU read lo     95  * Similarly, we avoid claiming an RCU read lock held if the current
101  * CPU is offline.                                 96  * CPU is offline.
102  */                                                97  */
103 static bool rcu_read_lock_held_common(bool *re     98 static bool rcu_read_lock_held_common(bool *ret)
104 {                                                  99 {
105         if (!debug_lockdep_rcu_enabled()) {       100         if (!debug_lockdep_rcu_enabled()) {
106                 *ret = true;                   !! 101                 *ret = 1;
107                 return true;                      102                 return true;
108         }                                         103         }
109         if (!rcu_is_watching()) {                 104         if (!rcu_is_watching()) {
110                 *ret = false;                  !! 105                 *ret = 0;
111                 return true;                      106                 return true;
112         }                                         107         }
113         if (!rcu_lockdep_current_cpu_online())    108         if (!rcu_lockdep_current_cpu_online()) {
114                 *ret = false;                  !! 109                 *ret = 0;
115                 return true;                      110                 return true;
116         }                                         111         }
117         return false;                             112         return false;
118 }                                                 113 }
119                                                   114 
120 int rcu_read_lock_sched_held(void)                115 int rcu_read_lock_sched_held(void)
121 {                                                 116 {
122         bool ret;                                 117         bool ret;
123                                                   118 
124         if (rcu_read_lock_held_common(&ret))      119         if (rcu_read_lock_held_common(&ret))
125                 return ret;                       120                 return ret;
126         return lock_is_held(&rcu_sched_lock_ma    121         return lock_is_held(&rcu_sched_lock_map) || !preemptible();
127 }                                                 122 }
128 EXPORT_SYMBOL(rcu_read_lock_sched_held);          123 EXPORT_SYMBOL(rcu_read_lock_sched_held);
129 #endif                                            124 #endif
130                                                   125 
131 #ifndef CONFIG_TINY_RCU                           126 #ifndef CONFIG_TINY_RCU
132                                                   127 
133 /*                                                128 /*
134  * Should expedited grace-period primitives al    129  * Should expedited grace-period primitives always fall back to their
135  * non-expedited counterparts?  Intended for u    130  * non-expedited counterparts?  Intended for use within RCU.  Note
136  * that if the user specifies both rcu_expedit    131  * that if the user specifies both rcu_expedited and rcu_normal, then
137  * rcu_normal wins.  (Except during the time p    132  * rcu_normal wins.  (Except during the time period during boot from
138  * when the first task is spawned until the rc    133  * when the first task is spawned until the rcu_set_runtime_mode()
139  * core_initcall() is invoked, at which point     134  * core_initcall() is invoked, at which point everything is expedited.)
140  */                                               135  */
141 bool rcu_gp_is_normal(void)                       136 bool rcu_gp_is_normal(void)
142 {                                                 137 {
143         return READ_ONCE(rcu_normal) &&           138         return READ_ONCE(rcu_normal) &&
144                rcu_scheduler_active != RCU_SCH    139                rcu_scheduler_active != RCU_SCHEDULER_INIT;
145 }                                                 140 }
146 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);              141 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
147                                                   142 
148 static atomic_t rcu_async_hurry_nesting = ATOM << 
149 /*                                             << 
150  * Should call_rcu() callbacks be processed wi << 
151  * they OK being executed with arbitrary delay << 
152  */                                            << 
153 bool rcu_async_should_hurry(void)              << 
154 {                                              << 
155         return !IS_ENABLED(CONFIG_RCU_LAZY) || << 
156                atomic_read(&rcu_async_hurry_ne << 
157 }                                              << 
158 EXPORT_SYMBOL_GPL(rcu_async_should_hurry);     << 
159                                                << 
160 /**                                            << 
161  * rcu_async_hurry - Make future async RCU cal << 
162  *                                             << 
163  * After a call to this function, future calls << 
164  * will be processed in a timely fashion.      << 
165  */                                            << 
166 void rcu_async_hurry(void)                     << 
167 {                                              << 
168         if (IS_ENABLED(CONFIG_RCU_LAZY))       << 
169                 atomic_inc(&rcu_async_hurry_ne << 
170 }                                              << 
171 EXPORT_SYMBOL_GPL(rcu_async_hurry);            << 
172                                                << 
173 /**                                            << 
174  * rcu_async_relax - Make future async RCU cal << 
175  *                                             << 
176  * After a call to this function, future calls << 
177  * will be processed in a lazy fashion.        << 
178  */                                            << 
179 void rcu_async_relax(void)                     << 
180 {                                              << 
181         if (IS_ENABLED(CONFIG_RCU_LAZY))       << 
182                 atomic_dec(&rcu_async_hurry_ne << 
183 }                                              << 
184 EXPORT_SYMBOL_GPL(rcu_async_relax);            << 
185                                                << 
186 static atomic_t rcu_expedited_nesting = ATOMIC    143 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
                                                   >> 144 
187 /*                                                145 /*
188  * Should normal grace-period primitives be ex    146  * Should normal grace-period primitives be expedited?  Intended for
189  * use within RCU.  Note that this function ta    147  * use within RCU.  Note that this function takes the rcu_expedited
190  * sysfs/boot variable and rcu_scheduler_activ    148  * sysfs/boot variable and rcu_scheduler_active into account as well
191  * as the rcu_expedite_gp() nesting.  So loopi    149  * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
192  * until rcu_gp_is_expedited() returns false i    150  * until rcu_gp_is_expedited() returns false is a -really- bad idea.
193  */                                               151  */
194 bool rcu_gp_is_expedited(void)                    152 bool rcu_gp_is_expedited(void)
195 {                                                 153 {
196         return rcu_expedited || atomic_read(&r    154         return rcu_expedited || atomic_read(&rcu_expedited_nesting);
197 }                                                 155 }
198 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);           156 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
199                                                   157 
200 /**                                               158 /**
201  * rcu_expedite_gp - Expedite future RCU grace    159  * rcu_expedite_gp - Expedite future RCU grace periods
202  *                                                160  *
203  * After a call to this function, future calls    161  * After a call to this function, future calls to synchronize_rcu() and
204  * friends act as the corresponding synchroniz    162  * friends act as the corresponding synchronize_rcu_expedited() function
205  * had instead been called.                       163  * had instead been called.
206  */                                               164  */
207 void rcu_expedite_gp(void)                        165 void rcu_expedite_gp(void)
208 {                                                 166 {
209         atomic_inc(&rcu_expedited_nesting);       167         atomic_inc(&rcu_expedited_nesting);
210 }                                                 168 }
211 EXPORT_SYMBOL_GPL(rcu_expedite_gp);               169 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
212                                                   170 
213 /**                                               171 /**
214  * rcu_unexpedite_gp - Cancel prior rcu_expedi    172  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
215  *                                                173  *
216  * Undo a prior call to rcu_expedite_gp().  If    174  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
217  * rcu_expedite_gp() are undone by a subsequen    175  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
218  * and if the rcu_expedited sysfs/boot paramet    176  * and if the rcu_expedited sysfs/boot parameter is not set, then all
219  * subsequent calls to synchronize_rcu() and f    177  * subsequent calls to synchronize_rcu() and friends will return to
220  * their normal non-expedited behavior.           178  * their normal non-expedited behavior.
221  */                                               179  */
222 void rcu_unexpedite_gp(void)                      180 void rcu_unexpedite_gp(void)
223 {                                                 181 {
224         atomic_dec(&rcu_expedited_nesting);       182         atomic_dec(&rcu_expedited_nesting);
225 }                                                 183 }
226 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);             184 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
227                                                   185 
228 static bool rcu_boot_ended __read_mostly;      << 
229                                                << 
230 /*                                                186 /*
231  * Inform RCU of the end of the in-kernel boot    187  * Inform RCU of the end of the in-kernel boot sequence.
232  */                                               188  */
233 void rcu_end_inkernel_boot(void)                  189 void rcu_end_inkernel_boot(void)
234 {                                                 190 {
235         rcu_unexpedite_gp();                      191         rcu_unexpedite_gp();
236         rcu_async_relax();                     << 
237         if (rcu_normal_after_boot)                192         if (rcu_normal_after_boot)
238                 WRITE_ONCE(rcu_normal, 1);        193                 WRITE_ONCE(rcu_normal, 1);
239         rcu_boot_ended = true;                 << 
240 }                                                 194 }
241                                                   195 
242 /*                                             << 
243  * Let rcutorture know when it is OK to turn i << 
244  */                                            << 
245 bool rcu_inkernel_boot_has_ended(void)         << 
246 {                                              << 
247         return rcu_boot_ended;                 << 
248 }                                              << 
249 EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended) << 
250                                                << 
251 #endif /* #ifndef CONFIG_TINY_RCU */              196 #endif /* #ifndef CONFIG_TINY_RCU */
252                                                   197 
253 /*                                                198 /*
254  * Test each non-SRCU synchronous grace-period    199  * Test each non-SRCU synchronous grace-period wait API.  This is
255  * useful just after a change in mode for thes    200  * useful just after a change in mode for these primitives, and
256  * during early boot.                             201  * during early boot.
257  */                                               202  */
258 void rcu_test_sync_prims(void)                    203 void rcu_test_sync_prims(void)
259 {                                                 204 {
260         if (!IS_ENABLED(CONFIG_PROVE_RCU))        205         if (!IS_ENABLED(CONFIG_PROVE_RCU))
261                 return;                           206                 return;
262         pr_info("Running RCU synchronous self  << 
263         synchronize_rcu();                        207         synchronize_rcu();
264         synchronize_rcu_expedited();              208         synchronize_rcu_expedited();
265 }                                                 209 }
266                                                   210 
267 #if !defined(CONFIG_TINY_RCU)                  !! 211 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
268                                                   212 
269 /*                                                213 /*
270  * Switch to run-time mode once RCU has fully     214  * Switch to run-time mode once RCU has fully initialized.
271  */                                               215  */
272 static int __init rcu_set_runtime_mode(void)      216 static int __init rcu_set_runtime_mode(void)
273 {                                                 217 {
274         rcu_test_sync_prims();                    218         rcu_test_sync_prims();
275         rcu_scheduler_active = RCU_SCHEDULER_R    219         rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
276         kfree_rcu_scheduler_running();            220         kfree_rcu_scheduler_running();
277         rcu_test_sync_prims();                    221         rcu_test_sync_prims();
278         return 0;                                 222         return 0;
279 }                                                 223 }
280 core_initcall(rcu_set_runtime_mode);              224 core_initcall(rcu_set_runtime_mode);
281                                                   225 
282 #endif /* #if !defined(CONFIG_TINY_RCU) */     !! 226 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
283                                                   227 
284 #ifdef CONFIG_DEBUG_LOCK_ALLOC                    228 #ifdef CONFIG_DEBUG_LOCK_ALLOC
285 static struct lock_class_key rcu_lock_key;        229 static struct lock_class_key rcu_lock_key;
286 struct lockdep_map rcu_lock_map = {            !! 230 struct lockdep_map rcu_lock_map =
287         .name = "rcu_read_lock",               !! 231         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
288         .key = &rcu_lock_key,                  << 
289         .wait_type_outer = LD_WAIT_FREE,       << 
290         .wait_type_inner = LD_WAIT_CONFIG, /*  << 
291 };                                             << 
292 EXPORT_SYMBOL_GPL(rcu_lock_map);                  232 EXPORT_SYMBOL_GPL(rcu_lock_map);
293                                                   233 
294 static struct lock_class_key rcu_bh_lock_key;     234 static struct lock_class_key rcu_bh_lock_key;
295 struct lockdep_map rcu_bh_lock_map = {         !! 235 struct lockdep_map rcu_bh_lock_map =
296         .name = "rcu_read_lock_bh",            !! 236         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
297         .key = &rcu_bh_lock_key,               << 
298         .wait_type_outer = LD_WAIT_FREE,       << 
299         .wait_type_inner = LD_WAIT_CONFIG, /*  << 
300 };                                             << 
301 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);               237 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
302                                                   238 
303 static struct lock_class_key rcu_sched_lock_ke    239 static struct lock_class_key rcu_sched_lock_key;
304 struct lockdep_map rcu_sched_lock_map = {      !! 240 struct lockdep_map rcu_sched_lock_map =
305         .name = "rcu_read_lock_sched",         !! 241         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
306         .key = &rcu_sched_lock_key,            << 
307         .wait_type_outer = LD_WAIT_FREE,       << 
308         .wait_type_inner = LD_WAIT_SPIN,       << 
309 };                                             << 
310 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);            242 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
311                                                   243 
312 // Tell lockdep when RCU callbacks are being i << 
313 static struct lock_class_key rcu_callback_key;    244 static struct lock_class_key rcu_callback_key;
314 struct lockdep_map rcu_callback_map =             245 struct lockdep_map rcu_callback_map =
315         STATIC_LOCKDEP_MAP_INIT("rcu_callback"    246         STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
316 EXPORT_SYMBOL_GPL(rcu_callback_map);              247 EXPORT_SYMBOL_GPL(rcu_callback_map);
317                                                   248 
318 noinstr int notrace debug_lockdep_rcu_enabled( !! 249 int notrace debug_lockdep_rcu_enabled(void)
319 {                                                 250 {
320         return rcu_scheduler_active != RCU_SCH !! 251         return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
321                current->lockdep_recursion == 0    252                current->lockdep_recursion == 0;
322 }                                                 253 }
323 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);     254 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
                                                   >> 255 NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
324                                                   256 
325 /**                                               257 /**
326  * rcu_read_lock_held() - might we be in RCU r    258  * rcu_read_lock_held() - might we be in RCU read-side critical section?
327  *                                                259  *
328  * If CONFIG_DEBUG_LOCK_ALLOC is selected, ret    260  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
329  * read-side critical section.  In absence of     261  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
330  * this assumes we are in an RCU read-side cri    262  * this assumes we are in an RCU read-side critical section unless it can
331  * prove otherwise.  This is useful for debug     263  * prove otherwise.  This is useful for debug checks in functions that
332  * require that they be called within an RCU r    264  * require that they be called within an RCU read-side critical section.
333  *                                                265  *
334  * Checks debug_lockdep_rcu_enabled() to preve    266  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
335  * and while lockdep is disabled.                 267  * and while lockdep is disabled.
336  *                                                268  *
337  * Note that rcu_read_lock() and the matching     269  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
338  * occur in the same context, for example, it     270  * occur in the same context, for example, it is illegal to invoke
339  * rcu_read_unlock() in process context if the    271  * rcu_read_unlock() in process context if the matching rcu_read_lock()
340  * was invoked from within an irq handler.        272  * was invoked from within an irq handler.
341  *                                                273  *
342  * Note that rcu_read_lock() is disallowed if     274  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
343  * offline from an RCU perspective, so check f    275  * offline from an RCU perspective, so check for those as well.
344  */                                               276  */
345 int rcu_read_lock_held(void)                      277 int rcu_read_lock_held(void)
346 {                                                 278 {
347         bool ret;                                 279         bool ret;
348                                                   280 
349         if (rcu_read_lock_held_common(&ret))      281         if (rcu_read_lock_held_common(&ret))
350                 return ret;                       282                 return ret;
351         return lock_is_held(&rcu_lock_map);       283         return lock_is_held(&rcu_lock_map);
352 }                                                 284 }
353 EXPORT_SYMBOL_GPL(rcu_read_lock_held);            285 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
354                                                   286 
355 /**                                               287 /**
356  * rcu_read_lock_bh_held() - might we be in RC    288  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
357  *                                                289  *
358  * Check for bottom half being disabled, which    290  * Check for bottom half being disabled, which covers both the
359  * CONFIG_PROVE_RCU and not cases.  Note that     291  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
360  * rcu_read_lock_bh(), but then later enables     292  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
361  * will show the situation.  This is useful fo    293  * will show the situation.  This is useful for debug checks in functions
362  * that require that they be called within an     294  * that require that they be called within an RCU read-side critical
363  * section.                                       295  * section.
364  *                                                296  *
365  * Check debug_lockdep_rcu_enabled() to preven    297  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
366  *                                                298  *
367  * Note that rcu_read_lock_bh() is disallowed     299  * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
368  * offline from an RCU perspective, so check f    300  * offline from an RCU perspective, so check for those as well.
369  */                                               301  */
370 int rcu_read_lock_bh_held(void)                   302 int rcu_read_lock_bh_held(void)
371 {                                                 303 {
372         bool ret;                                 304         bool ret;
373                                                   305 
374         if (rcu_read_lock_held_common(&ret))      306         if (rcu_read_lock_held_common(&ret))
375                 return ret;                       307                 return ret;
376         return in_softirq() || irqs_disabled()    308         return in_softirq() || irqs_disabled();
377 }                                                 309 }
378 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);         310 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
379                                                   311 
380 int rcu_read_lock_any_held(void)                  312 int rcu_read_lock_any_held(void)
381 {                                                 313 {
382         bool ret;                                 314         bool ret;
383                                                   315 
384         if (rcu_read_lock_held_common(&ret))      316         if (rcu_read_lock_held_common(&ret))
385                 return ret;                       317                 return ret;
386         if (lock_is_held(&rcu_lock_map) ||        318         if (lock_is_held(&rcu_lock_map) ||
387             lock_is_held(&rcu_bh_lock_map) ||     319             lock_is_held(&rcu_bh_lock_map) ||
388             lock_is_held(&rcu_sched_lock_map))    320             lock_is_held(&rcu_sched_lock_map))
389                 return 1;                         321                 return 1;
390         return !preemptible();                    322         return !preemptible();
391 }                                                 323 }
392 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);        324 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
393                                                   325 
394 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */       326 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
395                                                   327 
396 /**                                               328 /**
397  * wakeme_after_rcu() - Callback function to a    329  * wakeme_after_rcu() - Callback function to awaken a task after grace period
398  * @head: Pointer to rcu_head member within rc    330  * @head: Pointer to rcu_head member within rcu_synchronize structure
399  *                                                331  *
400  * Awaken the corresponding task now that a gr    332  * Awaken the corresponding task now that a grace period has elapsed.
401  */                                               333  */
402 void wakeme_after_rcu(struct rcu_head *head)      334 void wakeme_after_rcu(struct rcu_head *head)
403 {                                                 335 {
404         struct rcu_synchronize *rcu;              336         struct rcu_synchronize *rcu;
405                                                   337 
406         rcu = container_of(head, struct rcu_sy    338         rcu = container_of(head, struct rcu_synchronize, head);
407         complete(&rcu->completion);               339         complete(&rcu->completion);
408 }                                                 340 }
409 EXPORT_SYMBOL_GPL(wakeme_after_rcu);              341 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
410                                                   342 
411 void __wait_rcu_gp(bool checktiny, unsigned in !! 343 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
412                    struct rcu_synchronize *rs_    344                    struct rcu_synchronize *rs_array)
413 {                                                 345 {
414         int i;                                    346         int i;
415         int j;                                    347         int j;
416                                                   348 
417         /* Initialize and register callbacks f    349         /* Initialize and register callbacks for each crcu_array element. */
418         for (i = 0; i < n; i++) {                 350         for (i = 0; i < n; i++) {
419                 if (checktiny &&                  351                 if (checktiny &&
420                     (crcu_array[i] == call_rcu    352                     (crcu_array[i] == call_rcu)) {
421                         might_sleep();            353                         might_sleep();
422                         continue;                 354                         continue;
423                 }                                 355                 }
                                                   >> 356                 init_rcu_head_on_stack(&rs_array[i].head);
                                                   >> 357                 init_completion(&rs_array[i].completion);
424                 for (j = 0; j < i; j++)           358                 for (j = 0; j < i; j++)
425                         if (crcu_array[j] == c    359                         if (crcu_array[j] == crcu_array[i])
426                                 break;            360                                 break;
427                 if (j == i) {                  !! 361                 if (j == i)
428                         init_rcu_head_on_stack << 
429                         init_completion(&rs_ar << 
430                         (crcu_array[i])(&rs_ar    362                         (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
431                 }                              << 
432         }                                         363         }
433                                                   364 
434         /* Wait for all callbacks to be invoke    365         /* Wait for all callbacks to be invoked. */
435         for (i = 0; i < n; i++) {                 366         for (i = 0; i < n; i++) {
436                 if (checktiny &&                  367                 if (checktiny &&
437                     (crcu_array[i] == call_rcu    368                     (crcu_array[i] == call_rcu))
438                         continue;                 369                         continue;
439                 for (j = 0; j < i; j++)           370                 for (j = 0; j < i; j++)
440                         if (crcu_array[j] == c    371                         if (crcu_array[j] == crcu_array[i])
441                                 break;            372                                 break;
442                 if (j == i) {                  !! 373                 if (j == i)
443                         wait_for_completion_st !! 374                         wait_for_completion(&rs_array[i].completion);
444                         destroy_rcu_head_on_st !! 375                 destroy_rcu_head_on_stack(&rs_array[i].head);
445                 }                              << 
446         }                                         376         }
447 }                                                 377 }
448 EXPORT_SYMBOL_GPL(__wait_rcu_gp);                 378 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
449                                                   379 
450 void finish_rcuwait(struct rcuwait *w)         << 
451 {                                              << 
452         rcu_assign_pointer(w->task, NULL);     << 
453         __set_current_state(TASK_RUNNING);     << 
454 }                                              << 
455 EXPORT_SYMBOL_GPL(finish_rcuwait);             << 
456                                                << 
457 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD              380 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
458 void init_rcu_head(struct rcu_head *head)         381 void init_rcu_head(struct rcu_head *head)
459 {                                                 382 {
460         debug_object_init(head, &rcuhead_debug    383         debug_object_init(head, &rcuhead_debug_descr);
461 }                                                 384 }
462 EXPORT_SYMBOL_GPL(init_rcu_head);                 385 EXPORT_SYMBOL_GPL(init_rcu_head);
463                                                   386 
464 void destroy_rcu_head(struct rcu_head *head)      387 void destroy_rcu_head(struct rcu_head *head)
465 {                                                 388 {
466         debug_object_free(head, &rcuhead_debug    389         debug_object_free(head, &rcuhead_debug_descr);
467 }                                                 390 }
468 EXPORT_SYMBOL_GPL(destroy_rcu_head);              391 EXPORT_SYMBOL_GPL(destroy_rcu_head);
469                                                   392 
470 static bool rcuhead_is_static_object(void *add    393 static bool rcuhead_is_static_object(void *addr)
471 {                                                 394 {
472         return true;                              395         return true;
473 }                                                 396 }
474                                                   397 
475 /**                                               398 /**
476  * init_rcu_head_on_stack() - initialize on-st    399  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
477  * @head: pointer to rcu_head structure to be     400  * @head: pointer to rcu_head structure to be initialized
478  *                                                401  *
479  * This function informs debugobjects of a new    402  * This function informs debugobjects of a new rcu_head structure that
480  * has been allocated as an auto variable on t    403  * has been allocated as an auto variable on the stack.  This function
481  * is not required for rcu_head structures tha    404  * is not required for rcu_head structures that are statically defined or
482  * that are dynamically allocated on the heap.    405  * that are dynamically allocated on the heap.  This function has no
483  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD k    406  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
484  */                                               407  */
485 void init_rcu_head_on_stack(struct rcu_head *h    408 void init_rcu_head_on_stack(struct rcu_head *head)
486 {                                                 409 {
487         debug_object_init_on_stack(head, &rcuh    410         debug_object_init_on_stack(head, &rcuhead_debug_descr);
488 }                                                 411 }
489 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);        412 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
490                                                   413 
491 /**                                               414 /**
492  * destroy_rcu_head_on_stack() - destroy on-st    415  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
493  * @head: pointer to rcu_head structure to be     416  * @head: pointer to rcu_head structure to be initialized
494  *                                                417  *
495  * This function informs debugobjects that an     418  * This function informs debugobjects that an on-stack rcu_head structure
496  * is about to go out of scope.  As with init_    419  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
497  * function is not required for rcu_head struc    420  * function is not required for rcu_head structures that are statically
498  * defined or that are dynamically allocated o    421  * defined or that are dynamically allocated on the heap.  Also as with
499  * init_rcu_head_on_stack(), this function has    422  * init_rcu_head_on_stack(), this function has no effect for
500  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel build    423  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
501  */                                               424  */
502 void destroy_rcu_head_on_stack(struct rcu_head    425 void destroy_rcu_head_on_stack(struct rcu_head *head)
503 {                                                 426 {
504         debug_object_free(head, &rcuhead_debug    427         debug_object_free(head, &rcuhead_debug_descr);
505 }                                                 428 }
506 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);     429 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
507                                                   430 
508 const struct debug_obj_descr rcuhead_debug_des !! 431 struct debug_obj_descr rcuhead_debug_descr = {
509         .name = "rcu_head",                       432         .name = "rcu_head",
510         .is_static_object = rcuhead_is_static_    433         .is_static_object = rcuhead_is_static_object,
511 };                                                434 };
512 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);           435 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
513 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD    436 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
514                                                   437 
515 #if defined(CONFIG_TREE_RCU) || defined(CONFIG    438 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
516 void do_trace_rcu_torture_read(const char *rcu    439 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
517                                unsigned long s    440                                unsigned long secs,
518                                unsigned long c    441                                unsigned long c_old, unsigned long c)
519 {                                                 442 {
520         trace_rcu_torture_read(rcutorturename,    443         trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
521 }                                                 444 }
522 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);     445 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
523 #else                                             446 #else
524 #define do_trace_rcu_torture_read(rcutorturena    447 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
525         do { } while (0)                          448         do { } while (0)
526 #endif                                            449 #endif
527                                                   450 
528 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_ !! 451 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
529 /* Get rcutorture access to sched_setaffinity(    452 /* Get rcutorture access to sched_setaffinity(). */
530 long torture_sched_setaffinity(pid_t pid, cons !! 453 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
531 {                                                 454 {
532         int ret;                                  455         int ret;
533                                                   456 
534         ret = sched_setaffinity(pid, in_mask);    457         ret = sched_setaffinity(pid, in_mask);
535         WARN_ONCE(ret, "%s: sched_setaffinity( !! 458         WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
536         return ret;                               459         return ret;
537 }                                                 460 }
538 EXPORT_SYMBOL_GPL(torture_sched_setaffinity);  !! 461 EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
539 #endif                                            462 #endif
540                                                   463 
541 int rcu_cpu_stall_notifiers __read_mostly; //  << 
542 EXPORT_SYMBOL_GPL(rcu_cpu_stall_notifiers);    << 
543                                                << 
544 #ifdef CONFIG_RCU_STALL_COMMON                    464 #ifdef CONFIG_RCU_STALL_COMMON
545 int rcu_cpu_stall_ftrace_dump __read_mostly;      465 int rcu_cpu_stall_ftrace_dump __read_mostly;
546 module_param(rcu_cpu_stall_ftrace_dump, int, 0    466 module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
547 #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER           !! 467 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
548 module_param(rcu_cpu_stall_notifiers, int, 044 << 
549 #endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER << 
550 int rcu_cpu_stall_suppress __read_mostly; // ! << 
551 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);        468 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
552 module_param(rcu_cpu_stall_suppress, int, 0644    469 module_param(rcu_cpu_stall_suppress, int, 0644);
553 int rcu_cpu_stall_timeout __read_mostly = CONF    470 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
554 module_param(rcu_cpu_stall_timeout, int, 0644)    471 module_param(rcu_cpu_stall_timeout, int, 0644);
555 int rcu_exp_cpu_stall_timeout __read_mostly =  << 
556 module_param(rcu_exp_cpu_stall_timeout, int, 0 << 
557 int rcu_cpu_stall_cputime __read_mostly = IS_E << 
558 module_param(rcu_cpu_stall_cputime, int, 0644) << 
559 bool rcu_exp_stall_task_details __read_mostly; << 
560 module_param(rcu_exp_stall_task_details, bool, << 
561 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */       472 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
562                                                   473 
563 // Suppress boot-time RCU CPU stall warnings a !! 474 #ifdef CONFIG_TASKS_RCU
564 // warnings.  Also used by rcutorture even if  << 
565 int rcu_cpu_stall_suppress_at_boot __read_most << 
566 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_bo << 
567 module_param(rcu_cpu_stall_suppress_at_boot, i << 
568                                                   475 
569 /**                                            !! 476 /*
570  * get_completed_synchronize_rcu - Return a pr !! 477  * Simple variant of RCU whose quiescent states are voluntary context
571  *                                             !! 478  * switch, cond_resched_rcu_qs(), user-space execution, and idle.
572  * Returns a value that will always be treated !! 479  * As such, grace periods can take one good long time.  There are no
573  * poll_state_synchronize_rcu() as a cookie wh !! 480  * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
574  * completed.                                  !! 481  * because this implementation is intended to get the system into a safe
                                                   >> 482  * state for some of the manipulations involved in tracing and the like.
                                                   >> 483  * Finally, this implementation does not support high call_rcu_tasks()
                                                   >> 484  * rates from multiple CPUs.  If this is required, per-CPU callback lists
                                                   >> 485  * will be needed.
                                                   >> 486  */
                                                   >> 487 
                                                   >> 488 /* Global list of callbacks and associated lock. */
                                                   >> 489 static struct rcu_head *rcu_tasks_cbs_head;
                                                   >> 490 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
                                                   >> 491 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
                                                   >> 492 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
                                                   >> 493 
                                                   >> 494 /* Track exiting tasks in order to allow them to be waited for. */
                                                   >> 495 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
                                                   >> 496 
                                                   >> 497 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
                                                   >> 498 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
                                                   >> 499 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
                                                   >> 500 module_param(rcu_task_stall_timeout, int, 0644);
                                                   >> 501 
                                                   >> 502 static struct task_struct *rcu_tasks_kthread_ptr;
                                                   >> 503 
                                                   >> 504 /**
                                                   >> 505  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
                                                   >> 506  * @rhp: structure to be used for queueing the RCU updates.
                                                   >> 507  * @func: actual callback function to be invoked after the grace period
                                                   >> 508  *
                                                   >> 509  * The callback function will be invoked some time after a full grace
                                                   >> 510  * period elapses, in other words after all currently executing RCU
                                                   >> 511  * read-side critical sections have completed. call_rcu_tasks() assumes
                                                   >> 512  * that the read-side critical sections end at a voluntary context
                                                   >> 513  * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
                                                   >> 514  * or transition to usermode execution.  As such, there are no read-side
                                                   >> 515  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
                                                   >> 516  * this primitive is intended to determine that all tasks have passed
                                                   >> 517  * through a safe state, not so much for data-strcuture synchronization.
                                                   >> 518  *
                                                   >> 519  * See the description of call_rcu() for more detailed information on
                                                   >> 520  * memory ordering guarantees.
                                                   >> 521  */
                                                   >> 522 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
                                                   >> 523 {
                                                   >> 524         unsigned long flags;
                                                   >> 525         bool needwake;
                                                   >> 526 
                                                   >> 527         rhp->next = NULL;
                                                   >> 528         rhp->func = func;
                                                   >> 529         raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
                                                   >> 530         needwake = !rcu_tasks_cbs_head;
                                                   >> 531         *rcu_tasks_cbs_tail = rhp;
                                                   >> 532         rcu_tasks_cbs_tail = &rhp->next;
                                                   >> 533         raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
                                                   >> 534         /* We can't create the thread unless interrupts are enabled. */
                                                   >> 535         if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
                                                   >> 536                 wake_up(&rcu_tasks_cbs_wq);
                                                   >> 537 }
                                                   >> 538 EXPORT_SYMBOL_GPL(call_rcu_tasks);
                                                   >> 539 
                                                   >> 540 /**
                                                   >> 541  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
                                                   >> 542  *
                                                   >> 543  * Control will return to the caller some time after a full rcu-tasks
                                                   >> 544  * grace period has elapsed, in other words after all currently
                                                   >> 545  * executing rcu-tasks read-side critical sections have elapsed.  These
                                                   >> 546  * read-side critical sections are delimited by calls to schedule(),
                                                   >> 547  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
                                                   >> 548  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
                                                   >> 549  *
                                                   >> 550  * This is a very specialized primitive, intended only for a few uses in
                                                   >> 551  * tracing and other situations requiring manipulation of function
                                                   >> 552  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
                                                   >> 553  * is not (yet) intended for heavy use from multiple CPUs.
                                                   >> 554  *
                                                   >> 555  * Note that this guarantee implies further memory-ordering guarantees.
                                                   >> 556  * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
                                                   >> 557  * each CPU is guaranteed to have executed a full memory barrier since the
                                                   >> 558  * end of its last RCU-tasks read-side critical section whose beginning
                                                   >> 559  * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
                                                   >> 560  * having an RCU-tasks read-side critical section that extends beyond
                                                   >> 561  * the return from synchronize_rcu_tasks() is guaranteed to have executed
                                                   >> 562  * a full memory barrier after the beginning of synchronize_rcu_tasks()
                                                   >> 563  * and before the beginning of that RCU-tasks read-side critical section.
                                                   >> 564  * Note that these guarantees include CPUs that are offline, idle, or
                                                   >> 565  * executing in user mode, as well as CPUs that are executing in the kernel.
                                                   >> 566  *
                                                   >> 567  * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
                                                   >> 568  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
                                                   >> 569  * to have executed a full memory barrier during the execution of
                                                   >> 570  * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
                                                   >> 571  * (but again only if the system has more than one CPU).
                                                   >> 572  */
                                                   >> 573 void synchronize_rcu_tasks(void)
                                                   >> 574 {
                                                   >> 575         /* Complain if the scheduler has not started.  */
                                                   >> 576         RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
                                                   >> 577                          "synchronize_rcu_tasks called too soon");
                                                   >> 578 
                                                   >> 579         /* Wait for the grace period. */
                                                   >> 580         wait_rcu_gp(call_rcu_tasks);
                                                   >> 581 }
                                                   >> 582 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
                                                   >> 583 
                                                   >> 584 /**
                                                   >> 585  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
                                                   >> 586  *
                                                   >> 587  * Although the current implementation is guaranteed to wait, it is not
                                                   >> 588  * obligated to, for example, if there are no pending callbacks.
                                                   >> 589  */
                                                   >> 590 void rcu_barrier_tasks(void)
                                                   >> 591 {
                                                   >> 592         /* There is only one callback queue, so this is easy.  ;-) */
                                                   >> 593         synchronize_rcu_tasks();
                                                   >> 594 }
                                                   >> 595 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
                                                   >> 596 
                                                   >> 597 /* See if tasks are still holding out, complain if so. */
                                                   >> 598 static void check_holdout_task(struct task_struct *t,
                                                   >> 599                                bool needreport, bool *firstreport)
                                                   >> 600 {
                                                   >> 601         int cpu;
                                                   >> 602 
                                                   >> 603         if (!READ_ONCE(t->rcu_tasks_holdout) ||
                                                   >> 604             t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
                                                   >> 605             !READ_ONCE(t->on_rq) ||
                                                   >> 606             (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
                                                   >> 607              !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
                                                   >> 608                 WRITE_ONCE(t->rcu_tasks_holdout, false);
                                                   >> 609                 list_del_init(&t->rcu_tasks_holdout_list);
                                                   >> 610                 put_task_struct(t);
                                                   >> 611                 return;
                                                   >> 612         }
                                                   >> 613         rcu_request_urgent_qs_task(t);
                                                   >> 614         if (!needreport)
                                                   >> 615                 return;
                                                   >> 616         if (*firstreport) {
                                                   >> 617                 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
                                                   >> 618                 *firstreport = false;
                                                   >> 619         }
                                                   >> 620         cpu = task_cpu(t);
                                                   >> 621         pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
                                                   >> 622                  t, ".I"[is_idle_task(t)],
                                                   >> 623                  "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
                                                   >> 624                  t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
                                                   >> 625                  t->rcu_tasks_idle_cpu, cpu);
                                                   >> 626         sched_show_task(t);
                                                   >> 627 }
                                                   >> 628 
                                                   >> 629 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
                                                   >> 630 static int __noreturn rcu_tasks_kthread(void *arg)
                                                   >> 631 {
                                                   >> 632         unsigned long flags;
                                                   >> 633         struct task_struct *g, *t;
                                                   >> 634         unsigned long lastreport;
                                                   >> 635         struct rcu_head *list;
                                                   >> 636         struct rcu_head *next;
                                                   >> 637         LIST_HEAD(rcu_tasks_holdouts);
                                                   >> 638         int fract;
                                                   >> 639 
                                                   >> 640         /* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
                                                   >> 641         housekeeping_affine(current, HK_FLAG_RCU);
                                                   >> 642 
                                                   >> 643         /*
                                                   >> 644          * Each pass through the following loop makes one check for
                                                   >> 645          * newly arrived callbacks, and, if there are some, waits for
                                                   >> 646          * one RCU-tasks grace period and then invokes the callbacks.
                                                   >> 647          * This loop is terminated by the system going down.  ;-)
                                                   >> 648          */
                                                   >> 649         for (;;) {
                                                   >> 650 
                                                   >> 651                 /* Pick up any new callbacks. */
                                                   >> 652                 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
                                                   >> 653                 list = rcu_tasks_cbs_head;
                                                   >> 654                 rcu_tasks_cbs_head = NULL;
                                                   >> 655                 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
                                                   >> 656                 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
                                                   >> 657 
                                                   >> 658                 /* If there were none, wait a bit and start over. */
                                                   >> 659                 if (!list) {
                                                   >> 660                         wait_event_interruptible(rcu_tasks_cbs_wq,
                                                   >> 661                                                  rcu_tasks_cbs_head);
                                                   >> 662                         if (!rcu_tasks_cbs_head) {
                                                   >> 663                                 WARN_ON(signal_pending(current));
                                                   >> 664                                 schedule_timeout_interruptible(HZ/10);
                                                   >> 665                         }
                                                   >> 666                         continue;
                                                   >> 667                 }
                                                   >> 668 
                                                   >> 669                 /*
                                                   >> 670                  * Wait for all pre-existing t->on_rq and t->nvcsw
                                                   >> 671                  * transitions to complete.  Invoking synchronize_rcu()
                                                   >> 672                  * suffices because all these transitions occur with
                                                   >> 673                  * interrupts disabled.  Without this synchronize_rcu(),
                                                   >> 674                  * a read-side critical section that started before the
                                                   >> 675                  * grace period might be incorrectly seen as having started
                                                   >> 676                  * after the grace period.
                                                   >> 677                  *
                                                   >> 678                  * This synchronize_rcu() also dispenses with the
                                                   >> 679                  * need for a memory barrier on the first store to
                                                   >> 680                  * ->rcu_tasks_holdout, as it forces the store to happen
                                                   >> 681                  * after the beginning of the grace period.
                                                   >> 682                  */
                                                   >> 683                 synchronize_rcu();
                                                   >> 684 
                                                   >> 685                 /*
                                                   >> 686                  * There were callbacks, so we need to wait for an
                                                   >> 687                  * RCU-tasks grace period.  Start off by scanning
                                                   >> 688                  * the task list for tasks that are not already
                                                   >> 689                  * voluntarily blocked.  Mark these tasks and make
                                                   >> 690                  * a list of them in rcu_tasks_holdouts.
                                                   >> 691                  */
                                                   >> 692                 rcu_read_lock();
                                                   >> 693                 for_each_process_thread(g, t) {
                                                   >> 694                         if (t != current && READ_ONCE(t->on_rq) &&
                                                   >> 695                             !is_idle_task(t)) {
                                                   >> 696                                 get_task_struct(t);
                                                   >> 697                                 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
                                                   >> 698                                 WRITE_ONCE(t->rcu_tasks_holdout, true);
                                                   >> 699                                 list_add(&t->rcu_tasks_holdout_list,
                                                   >> 700                                          &rcu_tasks_holdouts);
                                                   >> 701                         }
                                                   >> 702                 }
                                                   >> 703                 rcu_read_unlock();
                                                   >> 704 
                                                   >> 705                 /*
                                                   >> 706                  * Wait for tasks that are in the process of exiting.
                                                   >> 707                  * This does only part of the job, ensuring that all
                                                   >> 708                  * tasks that were previously exiting reach the point
                                                   >> 709                  * where they have disabled preemption, allowing the
                                                   >> 710                  * later synchronize_rcu() to finish the job.
                                                   >> 711                  */
                                                   >> 712                 synchronize_srcu(&tasks_rcu_exit_srcu);
                                                   >> 713 
                                                   >> 714                 /*
                                                   >> 715                  * Each pass through the following loop scans the list
                                                   >> 716                  * of holdout tasks, removing any that are no longer
                                                   >> 717                  * holdouts.  When the list is empty, we are done.
                                                   >> 718                  */
                                                   >> 719                 lastreport = jiffies;
                                                   >> 720 
                                                   >> 721                 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
                                                   >> 722                 fract = 10;
                                                   >> 723 
                                                   >> 724                 for (;;) {
                                                   >> 725                         bool firstreport;
                                                   >> 726                         bool needreport;
                                                   >> 727                         int rtst;
                                                   >> 728                         struct task_struct *t1;
                                                   >> 729 
                                                   >> 730                         if (list_empty(&rcu_tasks_holdouts))
                                                   >> 731                                 break;
                                                   >> 732 
                                                   >> 733                         /* Slowly back off waiting for holdouts */
                                                   >> 734                         schedule_timeout_interruptible(HZ/fract);
                                                   >> 735 
                                                   >> 736                         if (fract > 1)
                                                   >> 737                                 fract--;
                                                   >> 738 
                                                   >> 739                         rtst = READ_ONCE(rcu_task_stall_timeout);
                                                   >> 740                         needreport = rtst > 0 &&
                                                   >> 741                                      time_after(jiffies, lastreport + rtst);
                                                   >> 742                         if (needreport)
                                                   >> 743                                 lastreport = jiffies;
                                                   >> 744                         firstreport = true;
                                                   >> 745                         WARN_ON(signal_pending(current));
                                                   >> 746                         list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
                                                   >> 747                                                 rcu_tasks_holdout_list) {
                                                   >> 748                                 check_holdout_task(t, needreport, &firstreport);
                                                   >> 749                                 cond_resched();
                                                   >> 750                         }
                                                   >> 751                 }
                                                   >> 752 
                                                   >> 753                 /*
                                                   >> 754                  * Because ->on_rq and ->nvcsw are not guaranteed
                                                   >> 755                  * to have a full memory barriers prior to them in the
                                                   >> 756                  * schedule() path, memory reordering on other CPUs could
                                                   >> 757                  * cause their RCU-tasks read-side critical sections to
                                                   >> 758                  * extend past the end of the grace period.  However,
                                                   >> 759                  * because these ->nvcsw updates are carried out with
                                                   >> 760                  * interrupts disabled, we can use synchronize_rcu()
                                                   >> 761                  * to force the needed ordering on all such CPUs.
                                                   >> 762                  *
                                                   >> 763                  * This synchronize_rcu() also confines all
                                                   >> 764                  * ->rcu_tasks_holdout accesses to be within the grace
                                                   >> 765                  * period, avoiding the need for memory barriers for
                                                   >> 766                  * ->rcu_tasks_holdout accesses.
                                                   >> 767                  *
                                                   >> 768                  * In addition, this synchronize_rcu() waits for exiting
                                                   >> 769                  * tasks to complete their final preempt_disable() region
                                                   >> 770                  * of execution, cleaning up after the synchronize_srcu()
                                                   >> 771                  * above.
                                                   >> 772                  */
                                                   >> 773                 synchronize_rcu();
                                                   >> 774 
                                                   >> 775                 /* Invoke the callbacks. */
                                                   >> 776                 while (list) {
                                                   >> 777                         next = list->next;
                                                   >> 778                         local_bh_disable();
                                                   >> 779                         list->func(list);
                                                   >> 780                         local_bh_enable();
                                                   >> 781                         list = next;
                                                   >> 782                         cond_resched();
                                                   >> 783                 }
                                                   >> 784                 /* Paranoid sleep to keep this from entering a tight loop */
                                                   >> 785                 schedule_timeout_uninterruptible(HZ/10);
                                                   >> 786         }
                                                   >> 787 }
                                                   >> 788 
                                                   >> 789 /* Spawn rcu_tasks_kthread() at core_initcall() time. */
                                                   >> 790 static int __init rcu_spawn_tasks_kthread(void)
                                                   >> 791 {
                                                   >> 792         struct task_struct *t;
                                                   >> 793 
                                                   >> 794         t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
                                                   >> 795         if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
                                                   >> 796                 return 0;
                                                   >> 797         smp_mb(); /* Ensure others see full kthread. */
                                                   >> 798         WRITE_ONCE(rcu_tasks_kthread_ptr, t);
                                                   >> 799         return 0;
                                                   >> 800 }
                                                   >> 801 core_initcall(rcu_spawn_tasks_kthread);
                                                   >> 802 
                                                   >> 803 /* Do the srcu_read_lock() for the above synchronize_srcu().  */
                                                   >> 804 void exit_tasks_rcu_start(void)
                                                   >> 805 {
                                                   >> 806         preempt_disable();
                                                   >> 807         current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
                                                   >> 808         preempt_enable();
                                                   >> 809 }
                                                   >> 810 
                                                   >> 811 /* Do the srcu_read_unlock() for the above synchronize_srcu().  */
                                                   >> 812 void exit_tasks_rcu_finish(void)
                                                   >> 813 {
                                                   >> 814         preempt_disable();
                                                   >> 815         __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
                                                   >> 816         preempt_enable();
                                                   >> 817 }
                                                   >> 818 
                                                   >> 819 #endif /* #ifdef CONFIG_TASKS_RCU */
                                                   >> 820 
                                                   >> 821 #ifndef CONFIG_TINY_RCU
                                                   >> 822 
                                                   >> 823 /*
                                                   >> 824  * Print any non-default Tasks RCU settings.
575  */                                               825  */
576 unsigned long get_completed_synchronize_rcu(vo !! 826 static void __init rcu_tasks_bootup_oddness(void)
577 {                                                 827 {
578         return RCU_GET_STATE_COMPLETED;        !! 828 #ifdef CONFIG_TASKS_RCU
                                                   >> 829         if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
                                                   >> 830                 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
                                                   >> 831         else
                                                   >> 832                 pr_info("\tTasks RCU enabled.\n");
                                                   >> 833 #endif /* #ifdef CONFIG_TASKS_RCU */
579 }                                                 834 }
580 EXPORT_SYMBOL_GPL(get_completed_synchronize_rc !! 835 
                                                   >> 836 #endif /* #ifndef CONFIG_TINY_RCU */
581                                                   837 
582 #ifdef CONFIG_PROVE_RCU                           838 #ifdef CONFIG_PROVE_RCU
583                                                   839 
584 /*                                                840 /*
585  * Early boot self test parameters.               841  * Early boot self test parameters.
586  */                                               842  */
587 static bool rcu_self_test;                        843 static bool rcu_self_test;
588 module_param(rcu_self_test, bool, 0444);          844 module_param(rcu_self_test, bool, 0444);
589                                                   845 
590 static int rcu_self_test_counter;                 846 static int rcu_self_test_counter;
591                                                   847 
592 static void test_callback(struct rcu_head *r)     848 static void test_callback(struct rcu_head *r)
593 {                                                 849 {
594         rcu_self_test_counter++;                  850         rcu_self_test_counter++;
595         pr_info("RCU test callback executed %d    851         pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
596 }                                                 852 }
597                                                   853 
598 DEFINE_STATIC_SRCU(early_srcu);                   854 DEFINE_STATIC_SRCU(early_srcu);
599 static unsigned long early_srcu_cookie;        << 
600                                                   855 
601 struct early_boot_kfree_rcu {                     856 struct early_boot_kfree_rcu {
602         struct rcu_head rh;                       857         struct rcu_head rh;
603 };                                                858 };
604                                                   859 
605 static void early_boot_test_call_rcu(void)        860 static void early_boot_test_call_rcu(void)
606 {                                                 861 {
607         static struct rcu_head head;              862         static struct rcu_head head;
608         int idx;                               << 
609         static struct rcu_head shead;             863         static struct rcu_head shead;
610         struct early_boot_kfree_rcu *rhp;         864         struct early_boot_kfree_rcu *rhp;
611                                                   865 
612         idx = srcu_down_read(&early_srcu);     << 
613         srcu_up_read(&early_srcu, idx);        << 
614         call_rcu(&head, test_callback);           866         call_rcu(&head, test_callback);
615         early_srcu_cookie = start_poll_synchro !! 867         if (IS_ENABLED(CONFIG_SRCU))
616         call_srcu(&early_srcu, &shead, test_ca !! 868                 call_srcu(&early_srcu, &shead, test_callback);
617         rhp = kmalloc(sizeof(*rhp), GFP_KERNEL    869         rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
618         if (!WARN_ON_ONCE(!rhp))                  870         if (!WARN_ON_ONCE(!rhp))
619                 kfree_rcu(rhp, rh);               871                 kfree_rcu(rhp, rh);
620 }                                                 872 }
621                                                   873 
622 void rcu_early_boot_tests(void)                   874 void rcu_early_boot_tests(void)
623 {                                                 875 {
624         pr_info("Running RCU self tests\n");      876         pr_info("Running RCU self tests\n");
625                                                   877 
626         if (rcu_self_test)                        878         if (rcu_self_test)
627                 early_boot_test_call_rcu();       879                 early_boot_test_call_rcu();
628         rcu_test_sync_prims();                    880         rcu_test_sync_prims();
629 }                                                 881 }
630                                                   882 
631 static int rcu_verify_early_boot_tests(void)      883 static int rcu_verify_early_boot_tests(void)
632 {                                                 884 {
633         int ret = 0;                              885         int ret = 0;
634         int early_boot_test_counter = 0;          886         int early_boot_test_counter = 0;
635                                                   887 
636         if (rcu_self_test) {                      888         if (rcu_self_test) {
637                 early_boot_test_counter++;        889                 early_boot_test_counter++;
638                 rcu_barrier();                    890                 rcu_barrier();
639                 early_boot_test_counter++;     !! 891                 if (IS_ENABLED(CONFIG_SRCU)) {
640                 srcu_barrier(&early_srcu);     !! 892                         early_boot_test_counter++;
641                 WARN_ON_ONCE(!poll_state_synch !! 893                         srcu_barrier(&early_srcu);
642                 cleanup_srcu_struct(&early_src !! 894                 }
643         }                                         895         }
644         if (rcu_self_test_counter != early_boo    896         if (rcu_self_test_counter != early_boot_test_counter) {
645                 WARN_ON(1);                       897                 WARN_ON(1);
646                 ret = -1;                         898                 ret = -1;
647         }                                         899         }
648                                                   900 
649         return ret;                               901         return ret;
650 }                                                 902 }
651 late_initcall(rcu_verify_early_boot_tests);       903 late_initcall(rcu_verify_early_boot_tests);
652 #else                                             904 #else
653 void rcu_early_boot_tests(void) {}                905 void rcu_early_boot_tests(void) {}
654 #endif /* CONFIG_PROVE_RCU */                     906 #endif /* CONFIG_PROVE_RCU */
655                                                << 
656 #include "tasks.h"                             << 
657                                                   907 
658 #ifndef CONFIG_TINY_RCU                           908 #ifndef CONFIG_TINY_RCU
659                                                   909 
660 /*                                                910 /*
661  * Print any significant non-default boot-time    911  * Print any significant non-default boot-time settings.
662  */                                               912  */
663 void __init rcupdate_announce_bootup_oddness(v    913 void __init rcupdate_announce_bootup_oddness(void)
664 {                                                 914 {
665         if (rcu_normal)                           915         if (rcu_normal)
666                 pr_info("\tNo expedited grace     916                 pr_info("\tNo expedited grace period (rcu_normal).\n");
667         else if (rcu_normal_after_boot)           917         else if (rcu_normal_after_boot)
668                 pr_info("\tNo expedited grace     918                 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
669         else if (rcu_expedited)                   919         else if (rcu_expedited)
670                 pr_info("\tAll grace periods a    920                 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
671         if (rcu_cpu_stall_suppress)               921         if (rcu_cpu_stall_suppress)
672                 pr_info("\tRCU CPU stall warni    922                 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
673         if (rcu_cpu_stall_timeout != CONFIG_RC    923         if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
674                 pr_info("\tRCU CPU stall warni    924                 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
675         rcu_tasks_bootup_oddness();               925         rcu_tasks_bootup_oddness();
676 }                                                 926 }
677                                                   927 
678 #endif /* #ifndef CONFIG_TINY_RCU */              928 #endif /* #ifndef CONFIG_TINY_RCU */
679                                                   929 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php