~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcu/update.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/rcu/update.c (Version linux-6.12-rc7) and /kernel/rcu/update.c (Version linux-4.18.20)


  1 // SPDX-License-Identifier: GPL-2.0+           << 
  2 /*                                                  1 /*
  3  * Read-Copy Update mechanism for mutual exclu      2  * Read-Copy Update mechanism for mutual exclusion
  4  *                                                  3  *
                                                   >>   4  * This program is free software; you can redistribute it and/or modify
                                                   >>   5  * it under the terms of the GNU General Public License as published by
                                                   >>   6  * the Free Software Foundation; either version 2 of the License, or
                                                   >>   7  * (at your option) any later version.
                                                   >>   8  *
                                                   >>   9  * This program is distributed in the hope that it will be useful,
                                                   >>  10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
                                                   >>  11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
                                                   >>  12  * GNU General Public License for more details.
                                                   >>  13  *
                                                   >>  14  * You should have received a copy of the GNU General Public License
                                                   >>  15  * along with this program; if not, you can access it online at
                                                   >>  16  * http://www.gnu.org/licenses/gpl-2.0.html.
                                                   >>  17  *
  5  * Copyright IBM Corporation, 2001                 18  * Copyright IBM Corporation, 2001
  6  *                                                 19  *
  7  * Authors: Dipankar Sarma <dipankar@in.ibm.co     20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  8  *          Manfred Spraul <manfred@colorfulli     21  *          Manfred Spraul <manfred@colorfullife.com>
  9  *                                                 22  *
 10  * Based on the original work by Paul McKenney !!  23  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 11  * and inputs from Rusty Russell, Andrea Arcan     24  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 12  * Papers:                                         25  * Papers:
 13  * http://www.rdrop.com/users/paulmck/paper/rc     26  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 14  * http://lse.sourceforge.net/locking/rclock_O     27  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 15  *                                                 28  *
 16  * For detailed explanation of Read-Copy Updat     29  * For detailed explanation of Read-Copy Update mechanism see -
 17  *              http://lse.sourceforge.net/loc     30  *              http://lse.sourceforge.net/locking/rcupdate.html
 18  *                                                 31  *
 19  */                                                32  */
 20 #include <linux/types.h>                           33 #include <linux/types.h>
 21 #include <linux/kernel.h>                          34 #include <linux/kernel.h>
 22 #include <linux/init.h>                            35 #include <linux/init.h>
 23 #include <linux/spinlock.h>                        36 #include <linux/spinlock.h>
 24 #include <linux/smp.h>                             37 #include <linux/smp.h>
 25 #include <linux/interrupt.h>                       38 #include <linux/interrupt.h>
 26 #include <linux/sched/signal.h>                    39 #include <linux/sched/signal.h>
 27 #include <linux/sched/debug.h>                     40 #include <linux/sched/debug.h>
 28 #include <linux/torture.h>                     << 
 29 #include <linux/atomic.h>                          41 #include <linux/atomic.h>
 30 #include <linux/bitops.h>                          42 #include <linux/bitops.h>
 31 #include <linux/percpu.h>                          43 #include <linux/percpu.h>
 32 #include <linux/notifier.h>                        44 #include <linux/notifier.h>
 33 #include <linux/cpu.h>                             45 #include <linux/cpu.h>
 34 #include <linux/mutex.h>                           46 #include <linux/mutex.h>
 35 #include <linux/export.h>                          47 #include <linux/export.h>
 36 #include <linux/hardirq.h>                         48 #include <linux/hardirq.h>
 37 #include <linux/delay.h>                           49 #include <linux/delay.h>
 38 #include <linux/moduleparam.h>                     50 #include <linux/moduleparam.h>
 39 #include <linux/kthread.h>                         51 #include <linux/kthread.h>
 40 #include <linux/tick.h>                            52 #include <linux/tick.h>
 41 #include <linux/rcupdate_wait.h>                   53 #include <linux/rcupdate_wait.h>
 42 #include <linux/sched/isolation.h>                 54 #include <linux/sched/isolation.h>
 43 #include <linux/kprobes.h>                     << 
 44 #include <linux/slab.h>                        << 
 45 #include <linux/irq_work.h>                    << 
 46 #include <linux/rcupdate_trace.h>              << 
 47                                                    55 
 48 #define CREATE_TRACE_POINTS                        56 #define CREATE_TRACE_POINTS
 49                                                    57 
 50 #include "rcu.h"                                   58 #include "rcu.h"
 51                                                    59 
 52 #ifdef MODULE_PARAM_PREFIX                         60 #ifdef MODULE_PARAM_PREFIX
 53 #undef MODULE_PARAM_PREFIX                         61 #undef MODULE_PARAM_PREFIX
 54 #endif                                             62 #endif
 55 #define MODULE_PARAM_PREFIX "rcupdate."            63 #define MODULE_PARAM_PREFIX "rcupdate."
 56                                                    64 
 57 #ifndef CONFIG_TINY_RCU                            65 #ifndef CONFIG_TINY_RCU
 58 module_param(rcu_expedited, int, 0444);        !!  66 extern int rcu_expedited; /* from sysctl */
 59 module_param(rcu_normal, int, 0444);           !!  67 module_param(rcu_expedited, int, 0);
 60 static int rcu_normal_after_boot = IS_ENABLED( !!  68 extern int rcu_normal; /* from sysctl */
 61 #if !defined(CONFIG_PREEMPT_RT) || defined(CON !!  69 module_param(rcu_normal, int, 0);
 62 module_param(rcu_normal_after_boot, int, 0444) !!  70 static int rcu_normal_after_boot;
 63 #endif                                         !!  71 module_param(rcu_normal_after_boot, int, 0);
 64 #endif /* #ifndef CONFIG_TINY_RCU */               72 #endif /* #ifndef CONFIG_TINY_RCU */
 65                                                    73 
 66 #ifdef CONFIG_DEBUG_LOCK_ALLOC                     74 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 67 /**                                                75 /**
 68  * rcu_read_lock_held_common() - might we be i !!  76  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
 69  * @ret:        Best guess answer if lockdep c << 
 70  *                                             << 
 71  * Returns true if lockdep must be ignored, in << 
 72  * the best guess described below.  Otherwise  << 
 73  * case ``*ret`` tells the caller nothing and  << 
 74  * consult lockdep.                            << 
 75  *                                                 77  *
 76  * If CONFIG_DEBUG_LOCK_ALLOC is selected, set !!  78  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 77  * RCU-sched read-side critical section.  In a     79  * RCU-sched read-side critical section.  In absence of
 78  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we ar     80  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 79  * critical section unless it can prove otherw     81  * critical section unless it can prove otherwise.  Note that disabling
 80  * of preemption (including disabling irqs) co     82  * of preemption (including disabling irqs) counts as an RCU-sched
 81  * read-side critical section.  This is useful     83  * read-side critical section.  This is useful for debug checks in functions
 82  * that required that they be called within an     84  * that required that they be called within an RCU-sched read-side
 83  * critical section.                               85  * critical section.
 84  *                                                 86  *
 85  * Check debug_lockdep_rcu_enabled() to preven     87  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 86  * and while lockdep is disabled.                  88  * and while lockdep is disabled.
 87  *                                                 89  *
 88  * Note that if the CPU is in the idle loop fr !!  90  * Note that if the CPU is in the idle loop from an RCU point of
 89  * that we are in the section between ct_idle_ !!  91  * view (ie: that we are in the section between rcu_idle_enter() and
 90  * then rcu_read_lock_held() sets ``*ret`` to  !!  92  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 91  * rcu_read_lock().  The reason for this is th !!  93  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 92  * in such a section, considering these as in  !!  94  * that are in such a section, considering these as in extended quiescent
 93  * so such a CPU is effectively never in an RC !!  95  * state, so such a CPU is effectively never in an RCU read-side critical
 94  * regardless of what RCU primitives it invoke !!  96  * section regardless of what RCU primitives it invokes.  This state of
 95  * required --- we need to keep an RCU-free wi !!  97  * affairs is required --- we need to keep an RCU-free window in idle
 96  * possibly enter into low power mode. This wa !!  98  * where the CPU may possibly enter into low power mode. This way we can
 97  * quiescent state to other CPUs that started  !!  99  * notice an extended quiescent state to other CPUs that started a grace
 98  * we would delay any grace period as long as  !! 100  * period. Otherwise we would delay any grace period as long as we run in
                                                   >> 101  * the idle task.
 99  *                                                102  *
100  * Similarly, we avoid claiming an RCU read lo !! 103  * Similarly, we avoid claiming an SRCU read lock held if the current
101  * CPU is offline.                                104  * CPU is offline.
102  */                                               105  */
103 static bool rcu_read_lock_held_common(bool *re << 
104 {                                              << 
105         if (!debug_lockdep_rcu_enabled()) {    << 
106                 *ret = true;                   << 
107                 return true;                   << 
108         }                                      << 
109         if (!rcu_is_watching()) {              << 
110                 *ret = false;                  << 
111                 return true;                   << 
112         }                                      << 
113         if (!rcu_lockdep_current_cpu_online()) << 
114                 *ret = false;                  << 
115                 return true;                   << 
116         }                                      << 
117         return false;                          << 
118 }                                              << 
119                                                << 
120 int rcu_read_lock_sched_held(void)                106 int rcu_read_lock_sched_held(void)
121 {                                                 107 {
122         bool ret;                              !! 108         int lockdep_opinion = 0;
123                                                   109 
124         if (rcu_read_lock_held_common(&ret))   !! 110         if (!debug_lockdep_rcu_enabled())
125                 return ret;                    !! 111                 return 1;
126         return lock_is_held(&rcu_sched_lock_ma !! 112         if (!rcu_is_watching())
                                                   >> 113                 return 0;
                                                   >> 114         if (!rcu_lockdep_current_cpu_online())
                                                   >> 115                 return 0;
                                                   >> 116         if (debug_locks)
                                                   >> 117                 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
                                                   >> 118         return lockdep_opinion || !preemptible();
127 }                                                 119 }
128 EXPORT_SYMBOL(rcu_read_lock_sched_held);          120 EXPORT_SYMBOL(rcu_read_lock_sched_held);
129 #endif                                            121 #endif
130                                                   122 
131 #ifndef CONFIG_TINY_RCU                           123 #ifndef CONFIG_TINY_RCU
132                                                   124 
133 /*                                                125 /*
134  * Should expedited grace-period primitives al    126  * Should expedited grace-period primitives always fall back to their
135  * non-expedited counterparts?  Intended for u    127  * non-expedited counterparts?  Intended for use within RCU.  Note
136  * that if the user specifies both rcu_expedit    128  * that if the user specifies both rcu_expedited and rcu_normal, then
137  * rcu_normal wins.  (Except during the time p    129  * rcu_normal wins.  (Except during the time period during boot from
138  * when the first task is spawned until the rc    130  * when the first task is spawned until the rcu_set_runtime_mode()
139  * core_initcall() is invoked, at which point     131  * core_initcall() is invoked, at which point everything is expedited.)
140  */                                               132  */
141 bool rcu_gp_is_normal(void)                       133 bool rcu_gp_is_normal(void)
142 {                                                 134 {
143         return READ_ONCE(rcu_normal) &&           135         return READ_ONCE(rcu_normal) &&
144                rcu_scheduler_active != RCU_SCH    136                rcu_scheduler_active != RCU_SCHEDULER_INIT;
145 }                                                 137 }
146 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);              138 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
147                                                   139 
148 static atomic_t rcu_async_hurry_nesting = ATOM << 
149 /*                                             << 
150  * Should call_rcu() callbacks be processed wi << 
151  * they OK being executed with arbitrary delay << 
152  */                                            << 
153 bool rcu_async_should_hurry(void)              << 
154 {                                              << 
155         return !IS_ENABLED(CONFIG_RCU_LAZY) || << 
156                atomic_read(&rcu_async_hurry_ne << 
157 }                                              << 
158 EXPORT_SYMBOL_GPL(rcu_async_should_hurry);     << 
159                                                << 
160 /**                                            << 
161  * rcu_async_hurry - Make future async RCU cal << 
162  *                                             << 
163  * After a call to this function, future calls << 
164  * will be processed in a timely fashion.      << 
165  */                                            << 
166 void rcu_async_hurry(void)                     << 
167 {                                              << 
168         if (IS_ENABLED(CONFIG_RCU_LAZY))       << 
169                 atomic_inc(&rcu_async_hurry_ne << 
170 }                                              << 
171 EXPORT_SYMBOL_GPL(rcu_async_hurry);            << 
172                                                << 
173 /**                                            << 
174  * rcu_async_relax - Make future async RCU cal << 
175  *                                             << 
176  * After a call to this function, future calls << 
177  * will be processed in a lazy fashion.        << 
178  */                                            << 
179 void rcu_async_relax(void)                     << 
180 {                                              << 
181         if (IS_ENABLED(CONFIG_RCU_LAZY))       << 
182                 atomic_dec(&rcu_async_hurry_ne << 
183 }                                              << 
184 EXPORT_SYMBOL_GPL(rcu_async_relax);            << 
185                                                << 
186 static atomic_t rcu_expedited_nesting = ATOMIC    140 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
                                                   >> 141 
187 /*                                                142 /*
188  * Should normal grace-period primitives be ex    143  * Should normal grace-period primitives be expedited?  Intended for
189  * use within RCU.  Note that this function ta    144  * use within RCU.  Note that this function takes the rcu_expedited
190  * sysfs/boot variable and rcu_scheduler_activ    145  * sysfs/boot variable and rcu_scheduler_active into account as well
191  * as the rcu_expedite_gp() nesting.  So loopi    146  * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
192  * until rcu_gp_is_expedited() returns false i    147  * until rcu_gp_is_expedited() returns false is a -really- bad idea.
193  */                                               148  */
194 bool rcu_gp_is_expedited(void)                    149 bool rcu_gp_is_expedited(void)
195 {                                                 150 {
196         return rcu_expedited || atomic_read(&r !! 151         return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
                                                   >> 152                rcu_scheduler_active == RCU_SCHEDULER_INIT;
197 }                                                 153 }
198 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);           154 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
199                                                   155 
200 /**                                               156 /**
201  * rcu_expedite_gp - Expedite future RCU grace    157  * rcu_expedite_gp - Expedite future RCU grace periods
202  *                                                158  *
203  * After a call to this function, future calls    159  * After a call to this function, future calls to synchronize_rcu() and
204  * friends act as the corresponding synchroniz    160  * friends act as the corresponding synchronize_rcu_expedited() function
205  * had instead been called.                       161  * had instead been called.
206  */                                               162  */
207 void rcu_expedite_gp(void)                        163 void rcu_expedite_gp(void)
208 {                                                 164 {
209         atomic_inc(&rcu_expedited_nesting);       165         atomic_inc(&rcu_expedited_nesting);
210 }                                                 166 }
211 EXPORT_SYMBOL_GPL(rcu_expedite_gp);               167 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
212                                                   168 
213 /**                                               169 /**
214  * rcu_unexpedite_gp - Cancel prior rcu_expedi    170  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
215  *                                                171  *
216  * Undo a prior call to rcu_expedite_gp().  If    172  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
217  * rcu_expedite_gp() are undone by a subsequen    173  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
218  * and if the rcu_expedited sysfs/boot paramet    174  * and if the rcu_expedited sysfs/boot parameter is not set, then all
219  * subsequent calls to synchronize_rcu() and f    175  * subsequent calls to synchronize_rcu() and friends will return to
220  * their normal non-expedited behavior.           176  * their normal non-expedited behavior.
221  */                                               177  */
222 void rcu_unexpedite_gp(void)                      178 void rcu_unexpedite_gp(void)
223 {                                                 179 {
224         atomic_dec(&rcu_expedited_nesting);       180         atomic_dec(&rcu_expedited_nesting);
225 }                                                 181 }
226 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);             182 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
227                                                   183 
228 static bool rcu_boot_ended __read_mostly;      << 
229                                                << 
230 /*                                                184 /*
231  * Inform RCU of the end of the in-kernel boot    185  * Inform RCU of the end of the in-kernel boot sequence.
232  */                                               186  */
233 void rcu_end_inkernel_boot(void)                  187 void rcu_end_inkernel_boot(void)
234 {                                                 188 {
235         rcu_unexpedite_gp();                      189         rcu_unexpedite_gp();
236         rcu_async_relax();                     << 
237         if (rcu_normal_after_boot)                190         if (rcu_normal_after_boot)
238                 WRITE_ONCE(rcu_normal, 1);        191                 WRITE_ONCE(rcu_normal, 1);
239         rcu_boot_ended = true;                 << 
240 }                                              << 
241                                                << 
242 /*                                             << 
243  * Let rcutorture know when it is OK to turn i << 
244  */                                            << 
245 bool rcu_inkernel_boot_has_ended(void)         << 
246 {                                              << 
247         return rcu_boot_ended;                 << 
248 }                                                 192 }
249 EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended) << 
250                                                   193 
251 #endif /* #ifndef CONFIG_TINY_RCU */              194 #endif /* #ifndef CONFIG_TINY_RCU */
252                                                   195 
253 /*                                                196 /*
254  * Test each non-SRCU synchronous grace-period    197  * Test each non-SRCU synchronous grace-period wait API.  This is
255  * useful just after a change in mode for thes    198  * useful just after a change in mode for these primitives, and
256  * during early boot.                             199  * during early boot.
257  */                                               200  */
258 void rcu_test_sync_prims(void)                    201 void rcu_test_sync_prims(void)
259 {                                                 202 {
260         if (!IS_ENABLED(CONFIG_PROVE_RCU))        203         if (!IS_ENABLED(CONFIG_PROVE_RCU))
261                 return;                           204                 return;
262         pr_info("Running RCU synchronous self  << 
263         synchronize_rcu();                        205         synchronize_rcu();
                                                   >> 206         synchronize_rcu_bh();
                                                   >> 207         synchronize_sched();
264         synchronize_rcu_expedited();              208         synchronize_rcu_expedited();
                                                   >> 209         synchronize_rcu_bh_expedited();
                                                   >> 210         synchronize_sched_expedited();
265 }                                                 211 }
266                                                   212 
267 #if !defined(CONFIG_TINY_RCU)                  !! 213 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
268                                                   214 
269 /*                                                215 /*
270  * Switch to run-time mode once RCU has fully     216  * Switch to run-time mode once RCU has fully initialized.
271  */                                               217  */
272 static int __init rcu_set_runtime_mode(void)      218 static int __init rcu_set_runtime_mode(void)
273 {                                                 219 {
274         rcu_test_sync_prims();                    220         rcu_test_sync_prims();
275         rcu_scheduler_active = RCU_SCHEDULER_R    221         rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
276         kfree_rcu_scheduler_running();         << 
277         rcu_test_sync_prims();                    222         rcu_test_sync_prims();
278         return 0;                                 223         return 0;
279 }                                                 224 }
280 core_initcall(rcu_set_runtime_mode);              225 core_initcall(rcu_set_runtime_mode);
281                                                   226 
282 #endif /* #if !defined(CONFIG_TINY_RCU) */     !! 227 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
283                                                   228 
284 #ifdef CONFIG_DEBUG_LOCK_ALLOC                    229 #ifdef CONFIG_DEBUG_LOCK_ALLOC
285 static struct lock_class_key rcu_lock_key;        230 static struct lock_class_key rcu_lock_key;
286 struct lockdep_map rcu_lock_map = {            !! 231 struct lockdep_map rcu_lock_map =
287         .name = "rcu_read_lock",               !! 232         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
288         .key = &rcu_lock_key,                  << 
289         .wait_type_outer = LD_WAIT_FREE,       << 
290         .wait_type_inner = LD_WAIT_CONFIG, /*  << 
291 };                                             << 
292 EXPORT_SYMBOL_GPL(rcu_lock_map);                  233 EXPORT_SYMBOL_GPL(rcu_lock_map);
293                                                   234 
294 static struct lock_class_key rcu_bh_lock_key;     235 static struct lock_class_key rcu_bh_lock_key;
295 struct lockdep_map rcu_bh_lock_map = {         !! 236 struct lockdep_map rcu_bh_lock_map =
296         .name = "rcu_read_lock_bh",            !! 237         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
297         .key = &rcu_bh_lock_key,               << 
298         .wait_type_outer = LD_WAIT_FREE,       << 
299         .wait_type_inner = LD_WAIT_CONFIG, /*  << 
300 };                                             << 
301 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);               238 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
302                                                   239 
303 static struct lock_class_key rcu_sched_lock_ke    240 static struct lock_class_key rcu_sched_lock_key;
304 struct lockdep_map rcu_sched_lock_map = {      !! 241 struct lockdep_map rcu_sched_lock_map =
305         .name = "rcu_read_lock_sched",         !! 242         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
306         .key = &rcu_sched_lock_key,            << 
307         .wait_type_outer = LD_WAIT_FREE,       << 
308         .wait_type_inner = LD_WAIT_SPIN,       << 
309 };                                             << 
310 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);            243 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
311                                                   244 
312 // Tell lockdep when RCU callbacks are being i << 
313 static struct lock_class_key rcu_callback_key;    245 static struct lock_class_key rcu_callback_key;
314 struct lockdep_map rcu_callback_map =             246 struct lockdep_map rcu_callback_map =
315         STATIC_LOCKDEP_MAP_INIT("rcu_callback"    247         STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
316 EXPORT_SYMBOL_GPL(rcu_callback_map);              248 EXPORT_SYMBOL_GPL(rcu_callback_map);
317                                                   249 
318 noinstr int notrace debug_lockdep_rcu_enabled( !! 250 int notrace debug_lockdep_rcu_enabled(void)
319 {                                                 251 {
320         return rcu_scheduler_active != RCU_SCH !! 252         return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
321                current->lockdep_recursion == 0    253                current->lockdep_recursion == 0;
322 }                                                 254 }
323 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);     255 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
324                                                   256 
325 /**                                               257 /**
326  * rcu_read_lock_held() - might we be in RCU r    258  * rcu_read_lock_held() - might we be in RCU read-side critical section?
327  *                                                259  *
328  * If CONFIG_DEBUG_LOCK_ALLOC is selected, ret    260  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
329  * read-side critical section.  In absence of     261  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
330  * this assumes we are in an RCU read-side cri    262  * this assumes we are in an RCU read-side critical section unless it can
331  * prove otherwise.  This is useful for debug     263  * prove otherwise.  This is useful for debug checks in functions that
332  * require that they be called within an RCU r    264  * require that they be called within an RCU read-side critical section.
333  *                                                265  *
334  * Checks debug_lockdep_rcu_enabled() to preve    266  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
335  * and while lockdep is disabled.                 267  * and while lockdep is disabled.
336  *                                                268  *
337  * Note that rcu_read_lock() and the matching     269  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
338  * occur in the same context, for example, it     270  * occur in the same context, for example, it is illegal to invoke
339  * rcu_read_unlock() in process context if the    271  * rcu_read_unlock() in process context if the matching rcu_read_lock()
340  * was invoked from within an irq handler.        272  * was invoked from within an irq handler.
341  *                                                273  *
342  * Note that rcu_read_lock() is disallowed if     274  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
343  * offline from an RCU perspective, so check f    275  * offline from an RCU perspective, so check for those as well.
344  */                                               276  */
345 int rcu_read_lock_held(void)                      277 int rcu_read_lock_held(void)
346 {                                                 278 {
347         bool ret;                              !! 279         if (!debug_lockdep_rcu_enabled())
348                                                !! 280                 return 1;
349         if (rcu_read_lock_held_common(&ret))   !! 281         if (!rcu_is_watching())
350                 return ret;                    !! 282                 return 0;
                                                   >> 283         if (!rcu_lockdep_current_cpu_online())
                                                   >> 284                 return 0;
351         return lock_is_held(&rcu_lock_map);       285         return lock_is_held(&rcu_lock_map);
352 }                                                 286 }
353 EXPORT_SYMBOL_GPL(rcu_read_lock_held);            287 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
354                                                   288 
355 /**                                               289 /**
356  * rcu_read_lock_bh_held() - might we be in RC    290  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
357  *                                                291  *
358  * Check for bottom half being disabled, which    292  * Check for bottom half being disabled, which covers both the
359  * CONFIG_PROVE_RCU and not cases.  Note that     293  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
360  * rcu_read_lock_bh(), but then later enables     294  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
361  * will show the situation.  This is useful fo    295  * will show the situation.  This is useful for debug checks in functions
362  * that require that they be called within an     296  * that require that they be called within an RCU read-side critical
363  * section.                                       297  * section.
364  *                                                298  *
365  * Check debug_lockdep_rcu_enabled() to preven    299  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
366  *                                                300  *
367  * Note that rcu_read_lock_bh() is disallowed  !! 301  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
368  * offline from an RCU perspective, so check f    302  * offline from an RCU perspective, so check for those as well.
369  */                                               303  */
370 int rcu_read_lock_bh_held(void)                   304 int rcu_read_lock_bh_held(void)
371 {                                                 305 {
372         bool ret;                              !! 306         if (!debug_lockdep_rcu_enabled())
373                                                !! 307                 return 1;
374         if (rcu_read_lock_held_common(&ret))   !! 308         if (!rcu_is_watching())
375                 return ret;                    !! 309                 return 0;
                                                   >> 310         if (!rcu_lockdep_current_cpu_online())
                                                   >> 311                 return 0;
376         return in_softirq() || irqs_disabled()    312         return in_softirq() || irqs_disabled();
377 }                                                 313 }
378 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);         314 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
379                                                   315 
380 int rcu_read_lock_any_held(void)               << 
381 {                                              << 
382         bool ret;                              << 
383                                                << 
384         if (rcu_read_lock_held_common(&ret))   << 
385                 return ret;                    << 
386         if (lock_is_held(&rcu_lock_map) ||     << 
387             lock_is_held(&rcu_bh_lock_map) ||  << 
388             lock_is_held(&rcu_sched_lock_map)) << 
389                 return 1;                      << 
390         return !preemptible();                 << 
391 }                                              << 
392 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);     << 
393                                                << 
394 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */       316 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
395                                                   317 
396 /**                                               318 /**
397  * wakeme_after_rcu() - Callback function to a    319  * wakeme_after_rcu() - Callback function to awaken a task after grace period
398  * @head: Pointer to rcu_head member within rc    320  * @head: Pointer to rcu_head member within rcu_synchronize structure
399  *                                                321  *
400  * Awaken the corresponding task now that a gr    322  * Awaken the corresponding task now that a grace period has elapsed.
401  */                                               323  */
402 void wakeme_after_rcu(struct rcu_head *head)      324 void wakeme_after_rcu(struct rcu_head *head)
403 {                                                 325 {
404         struct rcu_synchronize *rcu;              326         struct rcu_synchronize *rcu;
405                                                   327 
406         rcu = container_of(head, struct rcu_sy    328         rcu = container_of(head, struct rcu_synchronize, head);
407         complete(&rcu->completion);               329         complete(&rcu->completion);
408 }                                                 330 }
409 EXPORT_SYMBOL_GPL(wakeme_after_rcu);              331 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
410                                                   332 
411 void __wait_rcu_gp(bool checktiny, unsigned in !! 333 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
412                    struct rcu_synchronize *rs_    334                    struct rcu_synchronize *rs_array)
413 {                                                 335 {
414         int i;                                    336         int i;
415         int j;                                    337         int j;
416                                                   338 
417         /* Initialize and register callbacks f !! 339         /* Initialize and register callbacks for each flavor specified. */
418         for (i = 0; i < n; i++) {                 340         for (i = 0; i < n; i++) {
419                 if (checktiny &&                  341                 if (checktiny &&
420                     (crcu_array[i] == call_rcu !! 342                     (crcu_array[i] == call_rcu ||
                                                   >> 343                      crcu_array[i] == call_rcu_bh)) {
421                         might_sleep();            344                         might_sleep();
422                         continue;                 345                         continue;
423                 }                                 346                 }
                                                   >> 347                 init_rcu_head_on_stack(&rs_array[i].head);
                                                   >> 348                 init_completion(&rs_array[i].completion);
424                 for (j = 0; j < i; j++)           349                 for (j = 0; j < i; j++)
425                         if (crcu_array[j] == c    350                         if (crcu_array[j] == crcu_array[i])
426                                 break;            351                                 break;
427                 if (j == i) {                  !! 352                 if (j == i)
428                         init_rcu_head_on_stack << 
429                         init_completion(&rs_ar << 
430                         (crcu_array[i])(&rs_ar    353                         (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
431                 }                              << 
432         }                                         354         }
433                                                   355 
434         /* Wait for all callbacks to be invoke    356         /* Wait for all callbacks to be invoked. */
435         for (i = 0; i < n; i++) {                 357         for (i = 0; i < n; i++) {
436                 if (checktiny &&                  358                 if (checktiny &&
437                     (crcu_array[i] == call_rcu !! 359                     (crcu_array[i] == call_rcu ||
                                                   >> 360                      crcu_array[i] == call_rcu_bh))
438                         continue;                 361                         continue;
439                 for (j = 0; j < i; j++)           362                 for (j = 0; j < i; j++)
440                         if (crcu_array[j] == c    363                         if (crcu_array[j] == crcu_array[i])
441                                 break;            364                                 break;
442                 if (j == i) {                  !! 365                 if (j == i)
443                         wait_for_completion_st !! 366                         wait_for_completion(&rs_array[i].completion);
444                         destroy_rcu_head_on_st !! 367                 destroy_rcu_head_on_stack(&rs_array[i].head);
445                 }                              << 
446         }                                         368         }
447 }                                                 369 }
448 EXPORT_SYMBOL_GPL(__wait_rcu_gp);                 370 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
449                                                   371 
450 void finish_rcuwait(struct rcuwait *w)         << 
451 {                                              << 
452         rcu_assign_pointer(w->task, NULL);     << 
453         __set_current_state(TASK_RUNNING);     << 
454 }                                              << 
455 EXPORT_SYMBOL_GPL(finish_rcuwait);             << 
456                                                << 
457 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD              372 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
458 void init_rcu_head(struct rcu_head *head)         373 void init_rcu_head(struct rcu_head *head)
459 {                                                 374 {
460         debug_object_init(head, &rcuhead_debug    375         debug_object_init(head, &rcuhead_debug_descr);
461 }                                                 376 }
462 EXPORT_SYMBOL_GPL(init_rcu_head);                 377 EXPORT_SYMBOL_GPL(init_rcu_head);
463                                                   378 
464 void destroy_rcu_head(struct rcu_head *head)      379 void destroy_rcu_head(struct rcu_head *head)
465 {                                                 380 {
466         debug_object_free(head, &rcuhead_debug    381         debug_object_free(head, &rcuhead_debug_descr);
467 }                                                 382 }
468 EXPORT_SYMBOL_GPL(destroy_rcu_head);              383 EXPORT_SYMBOL_GPL(destroy_rcu_head);
469                                                   384 
470 static bool rcuhead_is_static_object(void *add    385 static bool rcuhead_is_static_object(void *addr)
471 {                                                 386 {
472         return true;                              387         return true;
473 }                                                 388 }
474                                                   389 
475 /**                                               390 /**
476  * init_rcu_head_on_stack() - initialize on-st    391  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
477  * @head: pointer to rcu_head structure to be     392  * @head: pointer to rcu_head structure to be initialized
478  *                                                393  *
479  * This function informs debugobjects of a new    394  * This function informs debugobjects of a new rcu_head structure that
480  * has been allocated as an auto variable on t    395  * has been allocated as an auto variable on the stack.  This function
481  * is not required for rcu_head structures tha    396  * is not required for rcu_head structures that are statically defined or
482  * that are dynamically allocated on the heap.    397  * that are dynamically allocated on the heap.  This function has no
483  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD k    398  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
484  */                                               399  */
485 void init_rcu_head_on_stack(struct rcu_head *h    400 void init_rcu_head_on_stack(struct rcu_head *head)
486 {                                                 401 {
487         debug_object_init_on_stack(head, &rcuh    402         debug_object_init_on_stack(head, &rcuhead_debug_descr);
488 }                                                 403 }
489 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);        404 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
490                                                   405 
491 /**                                               406 /**
492  * destroy_rcu_head_on_stack() - destroy on-st    407  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
493  * @head: pointer to rcu_head structure to be     408  * @head: pointer to rcu_head structure to be initialized
494  *                                                409  *
495  * This function informs debugobjects that an     410  * This function informs debugobjects that an on-stack rcu_head structure
496  * is about to go out of scope.  As with init_    411  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
497  * function is not required for rcu_head struc    412  * function is not required for rcu_head structures that are statically
498  * defined or that are dynamically allocated o    413  * defined or that are dynamically allocated on the heap.  Also as with
499  * init_rcu_head_on_stack(), this function has    414  * init_rcu_head_on_stack(), this function has no effect for
500  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel build    415  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
501  */                                               416  */
502 void destroy_rcu_head_on_stack(struct rcu_head    417 void destroy_rcu_head_on_stack(struct rcu_head *head)
503 {                                                 418 {
504         debug_object_free(head, &rcuhead_debug    419         debug_object_free(head, &rcuhead_debug_descr);
505 }                                                 420 }
506 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);     421 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
507                                                   422 
508 const struct debug_obj_descr rcuhead_debug_des !! 423 struct debug_obj_descr rcuhead_debug_descr = {
509         .name = "rcu_head",                       424         .name = "rcu_head",
510         .is_static_object = rcuhead_is_static_    425         .is_static_object = rcuhead_is_static_object,
511 };                                                426 };
512 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);           427 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
513 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD    428 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
514                                                   429 
515 #if defined(CONFIG_TREE_RCU) || defined(CONFIG !! 430 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
516 void do_trace_rcu_torture_read(const char *rcu    431 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
517                                unsigned long s    432                                unsigned long secs,
518                                unsigned long c    433                                unsigned long c_old, unsigned long c)
519 {                                                 434 {
520         trace_rcu_torture_read(rcutorturename,    435         trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
521 }                                                 436 }
522 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);     437 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
523 #else                                             438 #else
524 #define do_trace_rcu_torture_read(rcutorturena    439 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
525         do { } while (0)                          440         do { } while (0)
526 #endif                                            441 #endif
527                                                   442 
528 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_ !! 443 #ifdef CONFIG_RCU_STALL_COMMON
529 /* Get rcutorture access to sched_setaffinity( << 
530 long torture_sched_setaffinity(pid_t pid, cons << 
531 {                                              << 
532         int ret;                               << 
533                                                   444 
534         ret = sched_setaffinity(pid, in_mask); !! 445 #ifdef CONFIG_PROVE_RCU
535         WARN_ONCE(ret, "%s: sched_setaffinity( !! 446 #define RCU_STALL_DELAY_DELTA          (5 * HZ)
536         return ret;                            !! 447 #else
537 }                                              !! 448 #define RCU_STALL_DELAY_DELTA          0
538 EXPORT_SYMBOL_GPL(torture_sched_setaffinity);  << 
539 #endif                                            449 #endif
540                                                   450 
541 int rcu_cpu_stall_notifiers __read_mostly; //  !! 451 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
542 EXPORT_SYMBOL_GPL(rcu_cpu_stall_notifiers);    << 
543                                                << 
544 #ifdef CONFIG_RCU_STALL_COMMON                 << 
545 int rcu_cpu_stall_ftrace_dump __read_mostly;   << 
546 module_param(rcu_cpu_stall_ftrace_dump, int, 0 << 
547 #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER           << 
548 module_param(rcu_cpu_stall_notifiers, int, 044 << 
549 #endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER << 
550 int rcu_cpu_stall_suppress __read_mostly; // ! << 
551 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);        452 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
                                                   >> 453 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
                                                   >> 454 
552 module_param(rcu_cpu_stall_suppress, int, 0644    455 module_param(rcu_cpu_stall_suppress, int, 0644);
553 int rcu_cpu_stall_timeout __read_mostly = CONF << 
554 module_param(rcu_cpu_stall_timeout, int, 0644)    456 module_param(rcu_cpu_stall_timeout, int, 0644);
555 int rcu_exp_cpu_stall_timeout __read_mostly =  !! 457 
556 module_param(rcu_exp_cpu_stall_timeout, int, 0 !! 458 int rcu_jiffies_till_stall_check(void)
557 int rcu_cpu_stall_cputime __read_mostly = IS_E !! 459 {
558 module_param(rcu_cpu_stall_cputime, int, 0644) !! 460         int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
559 bool rcu_exp_stall_task_details __read_mostly; !! 461 
560 module_param(rcu_exp_stall_task_details, bool, !! 462         /*
                                                   >> 463          * Limit check must be consistent with the Kconfig limits
                                                   >> 464          * for CONFIG_RCU_CPU_STALL_TIMEOUT.
                                                   >> 465          */
                                                   >> 466         if (till_stall_check < 3) {
                                                   >> 467                 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
                                                   >> 468                 till_stall_check = 3;
                                                   >> 469         } else if (till_stall_check > 300) {
                                                   >> 470                 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
                                                   >> 471                 till_stall_check = 300;
                                                   >> 472         }
                                                   >> 473         return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
                                                   >> 474 }
                                                   >> 475 
                                                   >> 476 void rcu_sysrq_start(void)
                                                   >> 477 {
                                                   >> 478         if (!rcu_cpu_stall_suppress)
                                                   >> 479                 rcu_cpu_stall_suppress = 2;
                                                   >> 480 }
                                                   >> 481 
                                                   >> 482 void rcu_sysrq_end(void)
                                                   >> 483 {
                                                   >> 484         if (rcu_cpu_stall_suppress == 2)
                                                   >> 485                 rcu_cpu_stall_suppress = 0;
                                                   >> 486 }
                                                   >> 487 
                                                   >> 488 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
                                                   >> 489 {
                                                   >> 490         rcu_cpu_stall_suppress = 1;
                                                   >> 491         return NOTIFY_DONE;
                                                   >> 492 }
                                                   >> 493 
                                                   >> 494 static struct notifier_block rcu_panic_block = {
                                                   >> 495         .notifier_call = rcu_panic,
                                                   >> 496 };
                                                   >> 497 
                                                   >> 498 static int __init check_cpu_stall_init(void)
                                                   >> 499 {
                                                   >> 500         atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
                                                   >> 501         return 0;
                                                   >> 502 }
                                                   >> 503 early_initcall(check_cpu_stall_init);
                                                   >> 504 
561 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */       505 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
562                                                   506 
563 // Suppress boot-time RCU CPU stall warnings a !! 507 #ifdef CONFIG_TASKS_RCU
564 // warnings.  Also used by rcutorture even if  << 
565 int rcu_cpu_stall_suppress_at_boot __read_most << 
566 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_bo << 
567 module_param(rcu_cpu_stall_suppress_at_boot, i << 
568                                                   508 
569 /**                                            !! 509 /*
570  * get_completed_synchronize_rcu - Return a pr !! 510  * Simple variant of RCU whose quiescent states are voluntary context switch,
571  *                                             !! 511  * user-space execution, and idle.  As such, grace periods can take one good
572  * Returns a value that will always be treated !! 512  * long time.  There are no read-side primitives similar to rcu_read_lock()
573  * poll_state_synchronize_rcu() as a cookie wh !! 513  * and rcu_read_unlock() because this implementation is intended to get
574  * completed.                                  !! 514  * the system into a safe state for some of the manipulations involved in
                                                   >> 515  * tracing and the like.  Finally, this implementation does not support
                                                   >> 516  * high call_rcu_tasks() rates from multiple CPUs.  If this is required,
                                                   >> 517  * per-CPU callback lists will be needed.
                                                   >> 518  */
                                                   >> 519 
                                                   >> 520 /* Global list of callbacks and associated lock. */
                                                   >> 521 static struct rcu_head *rcu_tasks_cbs_head;
                                                   >> 522 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
                                                   >> 523 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
                                                   >> 524 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
                                                   >> 525 
                                                   >> 526 /* Track exiting tasks in order to allow them to be waited for. */
                                                   >> 527 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
                                                   >> 528 
                                                   >> 529 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
                                                   >> 530 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
                                                   >> 531 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
                                                   >> 532 module_param(rcu_task_stall_timeout, int, 0644);
                                                   >> 533 
                                                   >> 534 static struct task_struct *rcu_tasks_kthread_ptr;
                                                   >> 535 
                                                   >> 536 /**
                                                   >> 537  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
                                                   >> 538  * @rhp: structure to be used for queueing the RCU updates.
                                                   >> 539  * @func: actual callback function to be invoked after the grace period
                                                   >> 540  *
                                                   >> 541  * The callback function will be invoked some time after a full grace
                                                   >> 542  * period elapses, in other words after all currently executing RCU
                                                   >> 543  * read-side critical sections have completed. call_rcu_tasks() assumes
                                                   >> 544  * that the read-side critical sections end at a voluntary context
                                                   >> 545  * switch (not a preemption!), entry into idle, or transition to usermode
                                                   >> 546  * execution.  As such, there are no read-side primitives analogous to
                                                   >> 547  * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
                                                   >> 548  * to determine that all tasks have passed through a safe state, not so
                                                   >> 549  * much for data-strcuture synchronization.
                                                   >> 550  *
                                                   >> 551  * See the description of call_rcu() for more detailed information on
                                                   >> 552  * memory ordering guarantees.
                                                   >> 553  */
                                                   >> 554 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
                                                   >> 555 {
                                                   >> 556         unsigned long flags;
                                                   >> 557         bool needwake;
                                                   >> 558 
                                                   >> 559         rhp->next = NULL;
                                                   >> 560         rhp->func = func;
                                                   >> 561         raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
                                                   >> 562         needwake = !rcu_tasks_cbs_head;
                                                   >> 563         *rcu_tasks_cbs_tail = rhp;
                                                   >> 564         rcu_tasks_cbs_tail = &rhp->next;
                                                   >> 565         raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
                                                   >> 566         /* We can't create the thread unless interrupts are enabled. */
                                                   >> 567         if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
                                                   >> 568                 wake_up(&rcu_tasks_cbs_wq);
                                                   >> 569 }
                                                   >> 570 EXPORT_SYMBOL_GPL(call_rcu_tasks);
                                                   >> 571 
                                                   >> 572 /**
                                                   >> 573  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
                                                   >> 574  *
                                                   >> 575  * Control will return to the caller some time after a full rcu-tasks
                                                   >> 576  * grace period has elapsed, in other words after all currently
                                                   >> 577  * executing rcu-tasks read-side critical sections have elapsed.  These
                                                   >> 578  * read-side critical sections are delimited by calls to schedule(),
                                                   >> 579  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
                                                   >> 580  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
                                                   >> 581  *
                                                   >> 582  * This is a very specialized primitive, intended only for a few uses in
                                                   >> 583  * tracing and other situations requiring manipulation of function
                                                   >> 584  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
                                                   >> 585  * is not (yet) intended for heavy use from multiple CPUs.
                                                   >> 586  *
                                                   >> 587  * Note that this guarantee implies further memory-ordering guarantees.
                                                   >> 588  * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
                                                   >> 589  * each CPU is guaranteed to have executed a full memory barrier since the
                                                   >> 590  * end of its last RCU-tasks read-side critical section whose beginning
                                                   >> 591  * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
                                                   >> 592  * having an RCU-tasks read-side critical section that extends beyond
                                                   >> 593  * the return from synchronize_rcu_tasks() is guaranteed to have executed
                                                   >> 594  * a full memory barrier after the beginning of synchronize_rcu_tasks()
                                                   >> 595  * and before the beginning of that RCU-tasks read-side critical section.
                                                   >> 596  * Note that these guarantees include CPUs that are offline, idle, or
                                                   >> 597  * executing in user mode, as well as CPUs that are executing in the kernel.
                                                   >> 598  *
                                                   >> 599  * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
                                                   >> 600  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
                                                   >> 601  * to have executed a full memory barrier during the execution of
                                                   >> 602  * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
                                                   >> 603  * (but again only if the system has more than one CPU).
                                                   >> 604  */
                                                   >> 605 void synchronize_rcu_tasks(void)
                                                   >> 606 {
                                                   >> 607         /* Complain if the scheduler has not started.  */
                                                   >> 608         RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
                                                   >> 609                          "synchronize_rcu_tasks called too soon");
                                                   >> 610 
                                                   >> 611         /* Wait for the grace period. */
                                                   >> 612         wait_rcu_gp(call_rcu_tasks);
                                                   >> 613 }
                                                   >> 614 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
                                                   >> 615 
                                                   >> 616 /**
                                                   >> 617  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
                                                   >> 618  *
                                                   >> 619  * Although the current implementation is guaranteed to wait, it is not
                                                   >> 620  * obligated to, for example, if there are no pending callbacks.
                                                   >> 621  */
                                                   >> 622 void rcu_barrier_tasks(void)
                                                   >> 623 {
                                                   >> 624         /* There is only one callback queue, so this is easy.  ;-) */
                                                   >> 625         synchronize_rcu_tasks();
                                                   >> 626 }
                                                   >> 627 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
                                                   >> 628 
                                                   >> 629 /* See if tasks are still holding out, complain if so. */
                                                   >> 630 static void check_holdout_task(struct task_struct *t,
                                                   >> 631                                bool needreport, bool *firstreport)
                                                   >> 632 {
                                                   >> 633         int cpu;
                                                   >> 634 
                                                   >> 635         if (!READ_ONCE(t->rcu_tasks_holdout) ||
                                                   >> 636             t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
                                                   >> 637             !READ_ONCE(t->on_rq) ||
                                                   >> 638             (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
                                                   >> 639              !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
                                                   >> 640                 WRITE_ONCE(t->rcu_tasks_holdout, false);
                                                   >> 641                 list_del_init(&t->rcu_tasks_holdout_list);
                                                   >> 642                 put_task_struct(t);
                                                   >> 643                 return;
                                                   >> 644         }
                                                   >> 645         rcu_request_urgent_qs_task(t);
                                                   >> 646         if (!needreport)
                                                   >> 647                 return;
                                                   >> 648         if (*firstreport) {
                                                   >> 649                 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
                                                   >> 650                 *firstreport = false;
                                                   >> 651         }
                                                   >> 652         cpu = task_cpu(t);
                                                   >> 653         pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
                                                   >> 654                  t, ".I"[is_idle_task(t)],
                                                   >> 655                  "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
                                                   >> 656                  t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
                                                   >> 657                  t->rcu_tasks_idle_cpu, cpu);
                                                   >> 658         sched_show_task(t);
                                                   >> 659 }
                                                   >> 660 
                                                   >> 661 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
                                                   >> 662 static int __noreturn rcu_tasks_kthread(void *arg)
                                                   >> 663 {
                                                   >> 664         unsigned long flags;
                                                   >> 665         struct task_struct *g, *t;
                                                   >> 666         unsigned long lastreport;
                                                   >> 667         struct rcu_head *list;
                                                   >> 668         struct rcu_head *next;
                                                   >> 669         LIST_HEAD(rcu_tasks_holdouts);
                                                   >> 670 
                                                   >> 671         /* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
                                                   >> 672         housekeeping_affine(current, HK_FLAG_RCU);
                                                   >> 673 
                                                   >> 674         /*
                                                   >> 675          * Each pass through the following loop makes one check for
                                                   >> 676          * newly arrived callbacks, and, if there are some, waits for
                                                   >> 677          * one RCU-tasks grace period and then invokes the callbacks.
                                                   >> 678          * This loop is terminated by the system going down.  ;-)
                                                   >> 679          */
                                                   >> 680         for (;;) {
                                                   >> 681 
                                                   >> 682                 /* Pick up any new callbacks. */
                                                   >> 683                 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
                                                   >> 684                 list = rcu_tasks_cbs_head;
                                                   >> 685                 rcu_tasks_cbs_head = NULL;
                                                   >> 686                 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
                                                   >> 687                 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
                                                   >> 688 
                                                   >> 689                 /* If there were none, wait a bit and start over. */
                                                   >> 690                 if (!list) {
                                                   >> 691                         wait_event_interruptible(rcu_tasks_cbs_wq,
                                                   >> 692                                                  rcu_tasks_cbs_head);
                                                   >> 693                         if (!rcu_tasks_cbs_head) {
                                                   >> 694                                 WARN_ON(signal_pending(current));
                                                   >> 695                                 schedule_timeout_interruptible(HZ/10);
                                                   >> 696                         }
                                                   >> 697                         continue;
                                                   >> 698                 }
                                                   >> 699 
                                                   >> 700                 /*
                                                   >> 701                  * Wait for all pre-existing t->on_rq and t->nvcsw
                                                   >> 702                  * transitions to complete.  Invoking synchronize_sched()
                                                   >> 703                  * suffices because all these transitions occur with
                                                   >> 704                  * interrupts disabled.  Without this synchronize_sched(),
                                                   >> 705                  * a read-side critical section that started before the
                                                   >> 706                  * grace period might be incorrectly seen as having started
                                                   >> 707                  * after the grace period.
                                                   >> 708                  *
                                                   >> 709                  * This synchronize_sched() also dispenses with the
                                                   >> 710                  * need for a memory barrier on the first store to
                                                   >> 711                  * ->rcu_tasks_holdout, as it forces the store to happen
                                                   >> 712                  * after the beginning of the grace period.
                                                   >> 713                  */
                                                   >> 714                 synchronize_sched();
                                                   >> 715 
                                                   >> 716                 /*
                                                   >> 717                  * There were callbacks, so we need to wait for an
                                                   >> 718                  * RCU-tasks grace period.  Start off by scanning
                                                   >> 719                  * the task list for tasks that are not already
                                                   >> 720                  * voluntarily blocked.  Mark these tasks and make
                                                   >> 721                  * a list of them in rcu_tasks_holdouts.
                                                   >> 722                  */
                                                   >> 723                 rcu_read_lock();
                                                   >> 724                 for_each_process_thread(g, t) {
                                                   >> 725                         if (t != current && READ_ONCE(t->on_rq) &&
                                                   >> 726                             !is_idle_task(t)) {
                                                   >> 727                                 get_task_struct(t);
                                                   >> 728                                 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
                                                   >> 729                                 WRITE_ONCE(t->rcu_tasks_holdout, true);
                                                   >> 730                                 list_add(&t->rcu_tasks_holdout_list,
                                                   >> 731                                          &rcu_tasks_holdouts);
                                                   >> 732                         }
                                                   >> 733                 }
                                                   >> 734                 rcu_read_unlock();
                                                   >> 735 
                                                   >> 736                 /*
                                                   >> 737                  * Wait for tasks that are in the process of exiting.
                                                   >> 738                  * This does only part of the job, ensuring that all
                                                   >> 739                  * tasks that were previously exiting reach the point
                                                   >> 740                  * where they have disabled preemption, allowing the
                                                   >> 741                  * later synchronize_sched() to finish the job.
                                                   >> 742                  */
                                                   >> 743                 synchronize_srcu(&tasks_rcu_exit_srcu);
                                                   >> 744 
                                                   >> 745                 /*
                                                   >> 746                  * Each pass through the following loop scans the list
                                                   >> 747                  * of holdout tasks, removing any that are no longer
                                                   >> 748                  * holdouts.  When the list is empty, we are done.
                                                   >> 749                  */
                                                   >> 750                 lastreport = jiffies;
                                                   >> 751                 while (!list_empty(&rcu_tasks_holdouts)) {
                                                   >> 752                         bool firstreport;
                                                   >> 753                         bool needreport;
                                                   >> 754                         int rtst;
                                                   >> 755                         struct task_struct *t1;
                                                   >> 756 
                                                   >> 757                         schedule_timeout_interruptible(HZ);
                                                   >> 758                         rtst = READ_ONCE(rcu_task_stall_timeout);
                                                   >> 759                         needreport = rtst > 0 &&
                                                   >> 760                                      time_after(jiffies, lastreport + rtst);
                                                   >> 761                         if (needreport)
                                                   >> 762                                 lastreport = jiffies;
                                                   >> 763                         firstreport = true;
                                                   >> 764                         WARN_ON(signal_pending(current));
                                                   >> 765                         list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
                                                   >> 766                                                 rcu_tasks_holdout_list) {
                                                   >> 767                                 check_holdout_task(t, needreport, &firstreport);
                                                   >> 768                                 cond_resched();
                                                   >> 769                         }
                                                   >> 770                 }
                                                   >> 771 
                                                   >> 772                 /*
                                                   >> 773                  * Because ->on_rq and ->nvcsw are not guaranteed
                                                   >> 774                  * to have a full memory barriers prior to them in the
                                                   >> 775                  * schedule() path, memory reordering on other CPUs could
                                                   >> 776                  * cause their RCU-tasks read-side critical sections to
                                                   >> 777                  * extend past the end of the grace period.  However,
                                                   >> 778                  * because these ->nvcsw updates are carried out with
                                                   >> 779                  * interrupts disabled, we can use synchronize_sched()
                                                   >> 780                  * to force the needed ordering on all such CPUs.
                                                   >> 781                  *
                                                   >> 782                  * This synchronize_sched() also confines all
                                                   >> 783                  * ->rcu_tasks_holdout accesses to be within the grace
                                                   >> 784                  * period, avoiding the need for memory barriers for
                                                   >> 785                  * ->rcu_tasks_holdout accesses.
                                                   >> 786                  *
                                                   >> 787                  * In addition, this synchronize_sched() waits for exiting
                                                   >> 788                  * tasks to complete their final preempt_disable() region
                                                   >> 789                  * of execution, cleaning up after the synchronize_srcu()
                                                   >> 790                  * above.
                                                   >> 791                  */
                                                   >> 792                 synchronize_sched();
                                                   >> 793 
                                                   >> 794                 /* Invoke the callbacks. */
                                                   >> 795                 while (list) {
                                                   >> 796                         next = list->next;
                                                   >> 797                         local_bh_disable();
                                                   >> 798                         list->func(list);
                                                   >> 799                         local_bh_enable();
                                                   >> 800                         list = next;
                                                   >> 801                         cond_resched();
                                                   >> 802                 }
                                                   >> 803                 schedule_timeout_uninterruptible(HZ/10);
                                                   >> 804         }
                                                   >> 805 }
                                                   >> 806 
                                                   >> 807 /* Spawn rcu_tasks_kthread() at core_initcall() time. */
                                                   >> 808 static int __init rcu_spawn_tasks_kthread(void)
                                                   >> 809 {
                                                   >> 810         struct task_struct *t;
                                                   >> 811 
                                                   >> 812         t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
                                                   >> 813         BUG_ON(IS_ERR(t));
                                                   >> 814         smp_mb(); /* Ensure others see full kthread. */
                                                   >> 815         WRITE_ONCE(rcu_tasks_kthread_ptr, t);
                                                   >> 816         return 0;
                                                   >> 817 }
                                                   >> 818 core_initcall(rcu_spawn_tasks_kthread);
                                                   >> 819 
                                                   >> 820 /* Do the srcu_read_lock() for the above synchronize_srcu().  */
                                                   >> 821 void exit_tasks_rcu_start(void)
                                                   >> 822 {
                                                   >> 823         preempt_disable();
                                                   >> 824         current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
                                                   >> 825         preempt_enable();
                                                   >> 826 }
                                                   >> 827 
                                                   >> 828 /* Do the srcu_read_unlock() for the above synchronize_srcu().  */
                                                   >> 829 void exit_tasks_rcu_finish(void)
                                                   >> 830 {
                                                   >> 831         preempt_disable();
                                                   >> 832         __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
                                                   >> 833         preempt_enable();
                                                   >> 834 }
                                                   >> 835 
                                                   >> 836 #endif /* #ifdef CONFIG_TASKS_RCU */
                                                   >> 837 
                                                   >> 838 #ifndef CONFIG_TINY_RCU
                                                   >> 839 
                                                   >> 840 /*
                                                   >> 841  * Print any non-default Tasks RCU settings.
575  */                                               842  */
576 unsigned long get_completed_synchronize_rcu(vo !! 843 static void __init rcu_tasks_bootup_oddness(void)
577 {                                                 844 {
578         return RCU_GET_STATE_COMPLETED;        !! 845 #ifdef CONFIG_TASKS_RCU
                                                   >> 846         if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
                                                   >> 847                 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
                                                   >> 848         else
                                                   >> 849                 pr_info("\tTasks RCU enabled.\n");
                                                   >> 850 #endif /* #ifdef CONFIG_TASKS_RCU */
579 }                                                 851 }
580 EXPORT_SYMBOL_GPL(get_completed_synchronize_rc !! 852 
                                                   >> 853 #endif /* #ifndef CONFIG_TINY_RCU */
581                                                   854 
582 #ifdef CONFIG_PROVE_RCU                           855 #ifdef CONFIG_PROVE_RCU
583                                                   856 
584 /*                                                857 /*
585  * Early boot self test parameters.            !! 858  * Early boot self test parameters, one for each flavor
586  */                                               859  */
587 static bool rcu_self_test;                        860 static bool rcu_self_test;
                                                   >> 861 static bool rcu_self_test_bh;
                                                   >> 862 static bool rcu_self_test_sched;
                                                   >> 863 
588 module_param(rcu_self_test, bool, 0444);          864 module_param(rcu_self_test, bool, 0444);
                                                   >> 865 module_param(rcu_self_test_bh, bool, 0444);
                                                   >> 866 module_param(rcu_self_test_sched, bool, 0444);
589                                                   867 
590 static int rcu_self_test_counter;                 868 static int rcu_self_test_counter;
591                                                   869 
592 static void test_callback(struct rcu_head *r)     870 static void test_callback(struct rcu_head *r)
593 {                                                 871 {
594         rcu_self_test_counter++;                  872         rcu_self_test_counter++;
595         pr_info("RCU test callback executed %d    873         pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
596 }                                                 874 }
597                                                   875 
598 DEFINE_STATIC_SRCU(early_srcu);                << 
599 static unsigned long early_srcu_cookie;        << 
600                                                << 
601 struct early_boot_kfree_rcu {                  << 
602         struct rcu_head rh;                    << 
603 };                                             << 
604                                                << 
605 static void early_boot_test_call_rcu(void)        876 static void early_boot_test_call_rcu(void)
606 {                                                 877 {
607         static struct rcu_head head;              878         static struct rcu_head head;
608         int idx;                               << 
609         static struct rcu_head shead;          << 
610         struct early_boot_kfree_rcu *rhp;      << 
611                                                   879 
612         idx = srcu_down_read(&early_srcu);     << 
613         srcu_up_read(&early_srcu, idx);        << 
614         call_rcu(&head, test_callback);           880         call_rcu(&head, test_callback);
615         early_srcu_cookie = start_poll_synchro !! 881 }
616         call_srcu(&early_srcu, &shead, test_ca !! 882 
617         rhp = kmalloc(sizeof(*rhp), GFP_KERNEL !! 883 static void early_boot_test_call_rcu_bh(void)
618         if (!WARN_ON_ONCE(!rhp))               !! 884 {
619                 kfree_rcu(rhp, rh);            !! 885         static struct rcu_head head;
                                                   >> 886 
                                                   >> 887         call_rcu_bh(&head, test_callback);
                                                   >> 888 }
                                                   >> 889 
                                                   >> 890 static void early_boot_test_call_rcu_sched(void)
                                                   >> 891 {
                                                   >> 892         static struct rcu_head head;
                                                   >> 893 
                                                   >> 894         call_rcu_sched(&head, test_callback);
620 }                                                 895 }
621                                                   896 
622 void rcu_early_boot_tests(void)                   897 void rcu_early_boot_tests(void)
623 {                                                 898 {
624         pr_info("Running RCU self tests\n");      899         pr_info("Running RCU self tests\n");
625                                                   900 
626         if (rcu_self_test)                        901         if (rcu_self_test)
627                 early_boot_test_call_rcu();       902                 early_boot_test_call_rcu();
                                                   >> 903         if (rcu_self_test_bh)
                                                   >> 904                 early_boot_test_call_rcu_bh();
                                                   >> 905         if (rcu_self_test_sched)
                                                   >> 906                 early_boot_test_call_rcu_sched();
628         rcu_test_sync_prims();                    907         rcu_test_sync_prims();
629 }                                                 908 }
630                                                   909 
631 static int rcu_verify_early_boot_tests(void)      910 static int rcu_verify_early_boot_tests(void)
632 {                                                 911 {
633         int ret = 0;                              912         int ret = 0;
634         int early_boot_test_counter = 0;          913         int early_boot_test_counter = 0;
635                                                   914 
636         if (rcu_self_test) {                      915         if (rcu_self_test) {
637                 early_boot_test_counter++;        916                 early_boot_test_counter++;
638                 rcu_barrier();                    917                 rcu_barrier();
                                                   >> 918         }
                                                   >> 919         if (rcu_self_test_bh) {
639                 early_boot_test_counter++;        920                 early_boot_test_counter++;
640                 srcu_barrier(&early_srcu);     !! 921                 rcu_barrier_bh();
641                 WARN_ON_ONCE(!poll_state_synch << 
642                 cleanup_srcu_struct(&early_src << 
643         }                                         922         }
                                                   >> 923         if (rcu_self_test_sched) {
                                                   >> 924                 early_boot_test_counter++;
                                                   >> 925                 rcu_barrier_sched();
                                                   >> 926         }
                                                   >> 927 
644         if (rcu_self_test_counter != early_boo    928         if (rcu_self_test_counter != early_boot_test_counter) {
645                 WARN_ON(1);                       929                 WARN_ON(1);
646                 ret = -1;                         930                 ret = -1;
647         }                                         931         }
648                                                   932 
649         return ret;                               933         return ret;
650 }                                                 934 }
651 late_initcall(rcu_verify_early_boot_tests);       935 late_initcall(rcu_verify_early_boot_tests);
652 #else                                             936 #else
653 void rcu_early_boot_tests(void) {}                937 void rcu_early_boot_tests(void) {}
654 #endif /* CONFIG_PROVE_RCU */                     938 #endif /* CONFIG_PROVE_RCU */
655                                                << 
656 #include "tasks.h"                             << 
657                                                   939 
658 #ifndef CONFIG_TINY_RCU                           940 #ifndef CONFIG_TINY_RCU
659                                                   941 
660 /*                                                942 /*
661  * Print any significant non-default boot-time    943  * Print any significant non-default boot-time settings.
662  */                                               944  */
663 void __init rcupdate_announce_bootup_oddness(v    945 void __init rcupdate_announce_bootup_oddness(void)
664 {                                                 946 {
665         if (rcu_normal)                           947         if (rcu_normal)
666                 pr_info("\tNo expedited grace     948                 pr_info("\tNo expedited grace period (rcu_normal).\n");
667         else if (rcu_normal_after_boot)           949         else if (rcu_normal_after_boot)
668                 pr_info("\tNo expedited grace     950                 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
669         else if (rcu_expedited)                   951         else if (rcu_expedited)
670                 pr_info("\tAll grace periods a    952                 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
671         if (rcu_cpu_stall_suppress)               953         if (rcu_cpu_stall_suppress)
672                 pr_info("\tRCU CPU stall warni    954                 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
673         if (rcu_cpu_stall_timeout != CONFIG_RC    955         if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
674                 pr_info("\tRCU CPU stall warni    956                 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
675         rcu_tasks_bootup_oddness();               957         rcu_tasks_bootup_oddness();
676 }                                                 958 }
677                                                   959 
678 #endif /* #ifndef CONFIG_TINY_RCU */              960 #endif /* #ifndef CONFIG_TINY_RCU */
679                                                   961 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php