~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/context_tracking.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/context_tracking.c (Version linux-6.11.5) and /kernel/context_tracking.c (Version linux-5.17.15)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  * Context tracking: Probe on high level conte !!   3  * Context tracking: Probe on high level context boundaries such as kernel
  4  * userspace, guest or idle.                   !!   4  * and userspace. This includes syscalls and exceptions entry/exit.
  5  *                                                  5  *
  6  * This is used by RCU to remove its dependenc      6  * This is used by RCU to remove its dependency on the timer tick while a CPU
  7  * runs in idle, userspace or guest mode.      !!   7  * runs in userspace.
  8  *                                                  8  *
  9  * User/guest tracking started by Frederic Wei !!   9  *  Started by Frederic Weisbecker:
 10  *                                                 10  *
 11  * Copyright (C) 2012 Red Hat, Inc., Frederic  !!  11  * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
 12  *                                                 12  *
 13  * Many thanks to Gilad Ben-Yossef, Paul McKen     13  * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
 14  * Steven Rostedt, Peter Zijlstra for suggesti     14  * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
 15  *                                                 15  *
 16  * RCU extended quiescent state bits imported  << 
 17  * where the relevant authorship may be found. << 
 18  */                                                16  */
 19                                                    17 
 20 #include <linux/context_tracking.h>                18 #include <linux/context_tracking.h>
 21 #include <linux/rcupdate.h>                        19 #include <linux/rcupdate.h>
 22 #include <linux/sched.h>                           20 #include <linux/sched.h>
 23 #include <linux/hardirq.h>                         21 #include <linux/hardirq.h>
 24 #include <linux/export.h>                          22 #include <linux/export.h>
 25 #include <linux/kprobes.h>                         23 #include <linux/kprobes.h>
 26 #include <trace/events/rcu.h>                  << 
 27                                                << 
 28                                                << 
 29 DEFINE_PER_CPU(struct context_tracking, contex << 
 30 #ifdef CONFIG_CONTEXT_TRACKING_IDLE            << 
 31         .dynticks_nesting = 1,                 << 
 32         .dynticks_nmi_nesting = DYNTICK_IRQ_NO << 
 33 #endif                                         << 
 34         .state = ATOMIC_INIT(RCU_DYNTICKS_IDX) << 
 35 };                                             << 
 36 EXPORT_SYMBOL_GPL(context_tracking);           << 
 37                                                << 
 38 #ifdef CONFIG_CONTEXT_TRACKING_IDLE            << 
 39 #define TPS(x)  tracepoint_string(x)           << 
 40                                                << 
 41 /* Record the current task on dyntick-idle ent << 
 42 static __always_inline void rcu_dynticks_task_ << 
 43 {                                              << 
 44 #if defined(CONFIG_TASKS_RCU) && defined(CONFI << 
 45         WRITE_ONCE(current->rcu_tasks_idle_cpu << 
 46 #endif /* #if defined(CONFIG_TASKS_RCU) && def << 
 47 }                                              << 
 48                                                << 
 49 /* Record no current task on dyntick-idle exit << 
 50 static __always_inline void rcu_dynticks_task_ << 
 51 {                                              << 
 52 #if defined(CONFIG_TASKS_RCU) && defined(CONFI << 
 53         WRITE_ONCE(current->rcu_tasks_idle_cpu << 
 54 #endif /* #if defined(CONFIG_TASKS_RCU) && def << 
 55 }                                              << 
 56                                                << 
 57 /* Turn on heavyweight RCU tasks trace readers << 
 58 static __always_inline void rcu_dynticks_task_ << 
 59 {                                              << 
 60 #ifdef CONFIG_TASKS_TRACE_RCU                  << 
 61         if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_ << 
 62                 current->trc_reader_special.b. << 
 63 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */     << 
 64 }                                              << 
 65                                                << 
 66 /* Turn off heavyweight RCU tasks trace reader << 
 67 static __always_inline void rcu_dynticks_task_ << 
 68 {                                              << 
 69 #ifdef CONFIG_TASKS_TRACE_RCU                  << 
 70         if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_ << 
 71                 current->trc_reader_special.b. << 
 72 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */     << 
 73 }                                              << 
 74                                                << 
 75 /*                                             << 
 76  * Record entry into an extended quiescent sta << 
 77  * called when not already in an extended quie << 
 78  * RCU is watching prior to the call to this f << 
 79  * watching upon return.                       << 
 80  */                                            << 
 81 static noinstr void ct_kernel_exit_state(int o << 
 82 {                                              << 
 83         int seq;                               << 
 84                                                << 
 85         /*                                     << 
 86          * CPUs seeing atomic_add_return() mus << 
 87          * critical sections, and we also must << 
 88          * next idle sojourn.                  << 
 89          */                                    << 
 90         rcu_dynticks_task_trace_enter();  // B << 
 91         seq = ct_state_inc(offset);            << 
 92         // RCU is no longer watching.  Better  << 
 93         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS << 
 94 }                                              << 
 95                                                << 
 96 /*                                             << 
 97  * Record exit from an extended quiescent stat << 
 98  * called from an extended quiescent state, th << 
 99  * prior to the call to this function and is w << 
100  */                                            << 
101 static noinstr void ct_kernel_enter_state(int  << 
102 {                                              << 
103         int seq;                               << 
104                                                << 
105         /*                                     << 
106          * CPUs seeing atomic_add_return() mus << 
107          * and we also must force ordering wit << 
108          * critical section.                   << 
109          */                                    << 
110         seq = ct_state_inc(offset);            << 
111         // RCU is now watching.  Better not be << 
112         rcu_dynticks_task_trace_exit();  // Af << 
113         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS << 
114 }                                              << 
115                                                << 
116 /*                                             << 
117  * Enter an RCU extended quiescent state, whic << 
118  * idle loop or adaptive-tickless usermode exe << 
119  *                                             << 
120  * We crowbar the ->dynticks_nmi_nesting field << 
121  * the possibility of usermode upcalls having  << 
122  * of interrupt nesting level during the prior << 
123  */                                            << 
124 static void noinstr ct_kernel_exit(bool user,  << 
125 {                                              << 
126         struct context_tracking *ct = this_cpu << 
127                                                << 
128         WARN_ON_ONCE(ct_dynticks_nmi_nesting() << 
129         WRITE_ONCE(ct->dynticks_nmi_nesting, 0 << 
130         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS << 
131                      ct_dynticks_nesting() ==  << 
132         if (ct_dynticks_nesting() != 1) {      << 
133                 // RCU will still be watching, << 
134                 ct->dynticks_nesting--;        << 
135                 return;                        << 
136         }                                      << 
137                                                << 
138         instrumentation_begin();               << 
139         lockdep_assert_irqs_disabled();        << 
140         trace_rcu_dyntick(TPS("Start"), ct_dyn << 
141         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS << 
142         rcu_preempt_deferred_qs(current);      << 
143                                                << 
144         // instrumentation for the noinstr ct_ << 
145         instrument_atomic_write(&ct->state, si << 
146                                                << 
147         instrumentation_end();                 << 
148         WRITE_ONCE(ct->dynticks_nesting, 0); / << 
149         // RCU is watching here ...            << 
150         ct_kernel_exit_state(offset);          << 
151         // ... but is no longer watching here. << 
152         rcu_dynticks_task_enter();             << 
153 }                                              << 
154                                                << 
155 /*                                             << 
156  * Exit an RCU extended quiescent state, which << 
157  * idle loop or adaptive-tickless usermode exe << 
158  *                                             << 
159  * We crowbar the ->dynticks_nmi_nesting field << 
160  * allow for the possibility of usermode upcal << 
161  * interrupt nesting level during the busy per << 
162  */                                            << 
163 static void noinstr ct_kernel_enter(bool user, << 
164 {                                              << 
165         struct context_tracking *ct = this_cpu << 
166         long oldval;                           << 
167                                                << 
168         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS << 
169         oldval = ct_dynticks_nesting();        << 
170         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS << 
171         if (oldval) {                          << 
172                 // RCU was already watching, s << 
173                 ct->dynticks_nesting++;        << 
174                 return;                        << 
175         }                                      << 
176         rcu_dynticks_task_exit();              << 
177         // RCU is not watching here ...        << 
178         ct_kernel_enter_state(offset);         << 
179         // ... but is watching here.           << 
180         instrumentation_begin();               << 
181                                                << 
182         // instrumentation for the noinstr ct_ << 
183         instrument_atomic_write(&ct->state, si << 
184                                                << 
185         trace_rcu_dyntick(TPS("End"), ct_dynti << 
186         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS << 
187         WRITE_ONCE(ct->dynticks_nesting, 1);   << 
188         WARN_ON_ONCE(ct_dynticks_nmi_nesting() << 
189         WRITE_ONCE(ct->dynticks_nmi_nesting, D << 
190         instrumentation_end();                 << 
191 }                                              << 
192                                                << 
193 /**                                            << 
194  * ct_nmi_exit - inform RCU of exit from NMI c << 
195  *                                             << 
196  * If we are returning from the outermost NMI  << 
197  * RCU-idle period, update ct->state and ct->d << 
198  * to let the RCU grace-period handling know t << 
199  * being RCU-idle.                             << 
200  *                                             << 
201  * If you add or remove a call to ct_nmi_exit( << 
202  * with CONFIG_RCU_EQS_DEBUG=y.                << 
203  */                                            << 
204 void noinstr ct_nmi_exit(void)                 << 
205 {                                              << 
206         struct context_tracking *ct = this_cpu << 
207                                                << 
208         instrumentation_begin();               << 
209         /*                                     << 
210          * Check for ->dynticks_nmi_nesting un << 
211          * (We are exiting an NMI handler, so  << 
212          * to us!)                             << 
213          */                                    << 
214         WARN_ON_ONCE(ct_dynticks_nmi_nesting() << 
215         WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_ << 
216                                                << 
217         /*                                     << 
218          * If the nesting level is not 1, the  << 
219          * leave it in non-RCU-idle state.     << 
220          */                                    << 
221         if (ct_dynticks_nmi_nesting() != 1) {  << 
222                 trace_rcu_dyntick(TPS("--="),  << 
223                                   ct_dynticks( << 
224                 WRITE_ONCE(ct->dynticks_nmi_ne << 
225                            ct_dynticks_nmi_nes << 
226                 instrumentation_end();         << 
227                 return;                        << 
228         }                                      << 
229                                                << 
230         /* This NMI interrupted an RCU-idle CP << 
231         trace_rcu_dyntick(TPS("Startirq"), ct_ << 
232         WRITE_ONCE(ct->dynticks_nmi_nesting, 0 << 
233                                                << 
234         // instrumentation for the noinstr ct_ << 
235         instrument_atomic_write(&ct->state, si << 
236         instrumentation_end();                 << 
237                                                << 
238         // RCU is watching here ...            << 
239         ct_kernel_exit_state(RCU_DYNTICKS_IDX) << 
240         // ... but is no longer watching here. << 
241                                                << 
242         if (!in_nmi())                         << 
243                 rcu_dynticks_task_enter();     << 
244 }                                              << 
245                                                << 
246 /**                                            << 
247  * ct_nmi_enter - inform RCU of entry to NMI c << 
248  *                                             << 
249  * If the CPU was idle from RCU's viewpoint, u << 
250  * ct->dynticks_nmi_nesting to let the RCU gra << 
251  * that the CPU is active.  This implementatio << 
252  * long as the nesting level does not overflow << 
253  * run out of stack space first.)              << 
254  *                                             << 
255  * If you add or remove a call to ct_nmi_enter << 
256  * with CONFIG_RCU_EQS_DEBUG=y.                << 
257  */                                            << 
258 void noinstr ct_nmi_enter(void)                << 
259 {                                              << 
260         long incby = 2;                        << 
261         struct context_tracking *ct = this_cpu << 
262                                                << 
263         /* Complain about underflow. */        << 
264         WARN_ON_ONCE(ct_dynticks_nmi_nesting() << 
265                                                << 
266         /*                                     << 
267          * If idle from RCU viewpoint, atomica << 
268          * to mark non-idle and increment ->dy << 
269          * Otherwise, increment ->dynticks_nmi << 
270          * if ->dynticks_nmi_nesting is equal  << 
271          * to be in the outermost NMI handler  << 
272          * period (observation due to Andy Lut << 
273          */                                    << 
274         if (rcu_dynticks_curr_cpu_in_eqs()) {  << 
275                                                << 
276                 if (!in_nmi())                 << 
277                         rcu_dynticks_task_exit << 
278                                                << 
279                 // RCU is not watching here .. << 
280                 ct_kernel_enter_state(RCU_DYNT << 
281                 // ... but is watching here.   << 
282                                                << 
283                 instrumentation_begin();       << 
284                 // instrumentation for the noi << 
285                 instrument_atomic_read(&ct->st << 
286                 // instrumentation for the noi << 
287                 instrument_atomic_write(&ct->s << 
288                                                << 
289                 incby = 1;                     << 
290         } else if (!in_nmi()) {                << 
291                 instrumentation_begin();       << 
292                 rcu_irq_enter_check_tick();    << 
293         } else  {                              << 
294                 instrumentation_begin();       << 
295         }                                      << 
296                                                << 
297         trace_rcu_dyntick(incby == 1 ? TPS("En << 
298                           ct_dynticks_nmi_nest << 
299                           ct_dynticks_nmi_nest << 
300         instrumentation_end();                 << 
301         WRITE_ONCE(ct->dynticks_nmi_nesting, / << 
302                    ct_dynticks_nmi_nesting() + << 
303         barrier();                             << 
304 }                                              << 
305                                                << 
306 /**                                            << 
307  * ct_idle_enter - inform RCU that current CPU << 
308  *                                             << 
309  * Enter idle mode, in other words, -leave- th << 
310  * read-side critical sections can occur.  (Th << 
311  * critical sections can occur in irq handlers << 
312  * handled by irq_enter() and irq_exit().)     << 
313  *                                             << 
314  * If you add or remove a call to ct_idle_ente << 
315  * CONFIG_RCU_EQS_DEBUG=y.                     << 
316  */                                            << 
317 void noinstr ct_idle_enter(void)               << 
318 {                                              << 
319         WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS << 
320         ct_kernel_exit(false, RCU_DYNTICKS_IDX << 
321 }                                              << 
322 EXPORT_SYMBOL_GPL(ct_idle_enter);              << 
323                                                << 
324 /**                                            << 
325  * ct_idle_exit - inform RCU that current CPU  << 
326  *                                             << 
327  * Exit idle mode, in other words, -enter- the << 
328  * read-side critical sections can occur.      << 
329  *                                             << 
330  * If you add or remove a call to ct_idle_exit << 
331  * CONFIG_RCU_EQS_DEBUG=y.                     << 
332  */                                            << 
333 void noinstr ct_idle_exit(void)                << 
334 {                                              << 
335         unsigned long flags;                   << 
336                                                << 
337         raw_local_irq_save(flags);             << 
338         ct_kernel_enter(false, RCU_DYNTICKS_ID << 
339         raw_local_irq_restore(flags);          << 
340 }                                              << 
341 EXPORT_SYMBOL_GPL(ct_idle_exit);               << 
342                                                << 
343 /**                                            << 
344  * ct_irq_enter - inform RCU that current CPU  << 
345  *                                             << 
346  * Enter an interrupt handler, which might pos << 
347  * idle mode, in other words, entering the mod << 
348  * sections can occur.  The caller must have d << 
349  *                                             << 
350  * Note that the Linux kernel is fully capable << 
351  * handler that it never exits, for example wh << 
352  * This code assumes that the idle loop never  << 
353  * If your architecture's idle loop does do up << 
354  * anything else that results in unbalanced ca << 
355  * irq_exit() functions), RCU will give you wh << 
356  * But very infrequently and irreproducibly.   << 
357  *                                             << 
358  * Use things like work queues to work around  << 
359  *                                             << 
360  * You have been warned.                       << 
361  *                                             << 
362  * If you add or remove a call to ct_irq_enter << 
363  * CONFIG_RCU_EQS_DEBUG=y.                     << 
364  */                                            << 
365 noinstr void ct_irq_enter(void)                << 
366 {                                              << 
367         lockdep_assert_irqs_disabled();        << 
368         ct_nmi_enter();                        << 
369 }                                              << 
370                                                << 
371 /**                                            << 
372  * ct_irq_exit - inform RCU that current CPU i << 
373  *                                             << 
374  * Exit from an interrupt handler, which might << 
375  * idle mode, in other words, leaving the mode << 
376  * sections can occur.  The caller must have d << 
377  *                                             << 
378  * This code assumes that the idle loop never  << 
379  * result in unbalanced calls to irq_enter() a << 
380  * architecture's idle loop violates this assu << 
381  * you deserve, good and hard.  But very infre << 
382  *                                             << 
383  * Use things like work queues to work around  << 
384  *                                             << 
385  * You have been warned.                       << 
386  *                                             << 
387  * If you add or remove a call to ct_irq_exit( << 
388  * CONFIG_RCU_EQS_DEBUG=y.                     << 
389  */                                            << 
390 noinstr void ct_irq_exit(void)                 << 
391 {                                              << 
392         lockdep_assert_irqs_disabled();        << 
393         ct_nmi_exit();                         << 
394 }                                              << 
395                                                << 
396 /*                                             << 
397  * Wrapper for ct_irq_enter() where interrupts << 
398  *                                             << 
399  * If you add or remove a call to ct_irq_enter << 
400  * with CONFIG_RCU_EQS_DEBUG=y.                << 
401  */                                            << 
402 void ct_irq_enter_irqson(void)                 << 
403 {                                              << 
404         unsigned long flags;                   << 
405                                                << 
406         local_irq_save(flags);                 << 
407         ct_irq_enter();                        << 
408         local_irq_restore(flags);              << 
409 }                                              << 
410                                                << 
411 /*                                             << 
412  * Wrapper for ct_irq_exit() where interrupts  << 
413  *                                             << 
414  * If you add or remove a call to ct_irq_exit_ << 
415  * with CONFIG_RCU_EQS_DEBUG=y.                << 
416  */                                            << 
417 void ct_irq_exit_irqson(void)                  << 
418 {                                              << 
419         unsigned long flags;                   << 
420                                                << 
421         local_irq_save(flags);                 << 
422         ct_irq_exit();                         << 
423         local_irq_restore(flags);              << 
424 }                                              << 
425 #else                                          << 
426 static __always_inline void ct_kernel_exit(boo << 
427 static __always_inline void ct_kernel_enter(bo << 
428 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE  << 
429                                                << 
430 #ifdef CONFIG_CONTEXT_TRACKING_USER            << 
431                                                    24 
432 #define CREATE_TRACE_POINTS                        25 #define CREATE_TRACE_POINTS
433 #include <trace/events/context_tracking.h>         26 #include <trace/events/context_tracking.h>
434                                                    27 
435 DEFINE_STATIC_KEY_FALSE_RO(context_tracking_ke !!  28 DEFINE_STATIC_KEY_FALSE(context_tracking_key);
436 EXPORT_SYMBOL_GPL(context_tracking_key);           29 EXPORT_SYMBOL_GPL(context_tracking_key);
437                                                    30 
                                                   >>  31 DEFINE_PER_CPU(struct context_tracking, context_tracking);
                                                   >>  32 EXPORT_SYMBOL_GPL(context_tracking);
                                                   >>  33 
438 static noinstr bool context_tracking_recursion     34 static noinstr bool context_tracking_recursion_enter(void)
439 {                                                  35 {
440         int recursion;                             36         int recursion;
441                                                    37 
442         recursion = __this_cpu_inc_return(cont     38         recursion = __this_cpu_inc_return(context_tracking.recursion);
443         if (recursion == 1)                        39         if (recursion == 1)
444                 return true;                       40                 return true;
445                                                    41 
446         WARN_ONCE((recursion < 1), "Invalid co     42         WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
447         __this_cpu_dec(context_tracking.recurs     43         __this_cpu_dec(context_tracking.recursion);
448                                                    44 
449         return false;                              45         return false;
450 }                                                  46 }
451                                                    47 
452 static __always_inline void context_tracking_r     48 static __always_inline void context_tracking_recursion_exit(void)
453 {                                                  49 {
454         __this_cpu_dec(context_tracking.recurs     50         __this_cpu_dec(context_tracking.recursion);
455 }                                                  51 }
456                                                    52 
457 /**                                                53 /**
458  * __ct_user_enter - Inform the context tracki !!  54  * context_tracking_enter - Inform the context tracking that the CPU is going
459  *                   to enter user or guest sp !!  55  *                          enter user or guest space mode.
460  *                                             << 
461  * @state: userspace context-tracking state to << 
462  *                                                 56  *
463  * This function must be called right before w     57  * This function must be called right before we switch from the kernel
464  * to user or guest space, when it's guarantee     58  * to user or guest space, when it's guaranteed the remaining kernel
465  * instructions to execute won't use any RCU r     59  * instructions to execute won't use any RCU read side critical section
466  * because this function sets RCU in extended      60  * because this function sets RCU in extended quiescent state.
467  */                                                61  */
468 void noinstr __ct_user_enter(enum ctx_state st !!  62 void noinstr __context_tracking_enter(enum ctx_state state)
469 {                                                  63 {
470         struct context_tracking *ct = this_cpu << 
471         lockdep_assert_irqs_disabled();        << 
472                                                << 
473         /* Kernel threads aren't supposed to g     64         /* Kernel threads aren't supposed to go to userspace */
474         WARN_ON_ONCE(!current->mm);                65         WARN_ON_ONCE(!current->mm);
475                                                    66 
476         if (!context_tracking_recursion_enter(     67         if (!context_tracking_recursion_enter())
477                 return;                            68                 return;
478                                                    69 
479         if (__ct_state() != state) {           !!  70         if ( __this_cpu_read(context_tracking.state) != state) {
480                 if (ct->active) {              !!  71                 if (__this_cpu_read(context_tracking.active)) {
481                         /*                         72                         /*
482                          * At this stage, only     73                          * At this stage, only low level arch entry code remains and
483                          * then we'll run in u     74                          * then we'll run in userspace. We can assume there won't be
484                          * any RCU read-side c     75                          * any RCU read-side critical section until the next call to
485                          * user_exit() or ct_i !!  76                          * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
486                          * on the tick.            77                          * on the tick.
487                          */                        78                          */
488                         if (state == CONTEXT_U     79                         if (state == CONTEXT_USER) {
489                                 instrumentatio     80                                 instrumentation_begin();
490                                 trace_user_ent     81                                 trace_user_enter(0);
491                                 vtime_user_ent     82                                 vtime_user_enter(current);
492                                 instrumentatio     83                                 instrumentation_end();
493                         }                          84                         }
494                         /*                     !!  85                         rcu_user_enter();
495                          * Other than generic  << 
496                          * rescheduling opport << 
497                          * that will fire and  << 
498                          */                    << 
499                         rcu_irq_work_resched() << 
500                                                << 
501                         /*                     << 
502                          * Enter RCU idle mode << 
503                          * is permitted betwee << 
504                          * CPU doesn't need to << 
505                          * when the CPU runs i << 
506                          */                    << 
507                         ct_kernel_exit(true, R << 
508                                                << 
509                         /*                     << 
510                          * Special case if we  << 
511                          * cputime accounting  << 
512                          * In this we case we  << 
513                          */                    << 
514                         if (!IS_ENABLED(CONFIG << 
515                                 raw_atomic_set << 
516                 } else {                       << 
517                         /*                     << 
518                          * Even if context tra << 
519                          * the full dynticks m << 
520                          * context transitions << 
521                          * other CPUs.         << 
522                          * If a task triggers  << 
523                          * handler and then mi << 
524                          * the exception retur << 
525                          * This information ca << 
526                          * exception_enter().  << 
527                          * OTOH we can spare t << 
528                          * is false because we << 
529                          */                    << 
530                         if (!IS_ENABLED(CONFIG << 
531                                 /* Tracking fo << 
532                                 raw_atomic_set << 
533                         } else {               << 
534                                 /*             << 
535                                  * Tracking fo << 
536                                  * with NMIs.  << 
537                                  * RCU only re << 
538                                  * ordered.    << 
539                                  */            << 
540                                 raw_atomic_add << 
541                         }                      << 
542                 }                                  86                 }
                                                   >>  87                 /*
                                                   >>  88                  * Even if context tracking is disabled on this CPU, because it's outside
                                                   >>  89                  * the full dynticks mask for example, we still have to keep track of the
                                                   >>  90                  * context transitions and states to prevent inconsistency on those of
                                                   >>  91                  * other CPUs.
                                                   >>  92                  * If a task triggers an exception in userspace, sleep on the exception
                                                   >>  93                  * handler and then migrate to another CPU, that new CPU must know where
                                                   >>  94                  * the exception returns by the time we call exception_exit().
                                                   >>  95                  * This information can only be provided by the previous CPU when it called
                                                   >>  96                  * exception_enter().
                                                   >>  97                  * OTOH we can spare the calls to vtime and RCU when context_tracking.active
                                                   >>  98                  * is false because we know that CPU is not tickless.
                                                   >>  99                  */
                                                   >> 100                 __this_cpu_write(context_tracking.state, state);
543         }                                         101         }
544         context_tracking_recursion_exit();        102         context_tracking_recursion_exit();
545 }                                                 103 }
546 EXPORT_SYMBOL_GPL(__ct_user_enter);            !! 104 EXPORT_SYMBOL_GPL(__context_tracking_enter);
547                                                   105 
548 /*                                             !! 106 void context_tracking_enter(enum ctx_state state)
549  * OBSOLETE:                                   << 
550  * This function should be noinstr but the bel << 
551  * unsafe because it involves illegal RCU uses << 
552  * This is unlikely to be fixed as this functi << 
553  * way is to call __context_tracking_enter() t << 
554  * or context_tracking_guest_enter(). It shoul << 
555  * responsibility to call into context trackin << 
556  */                                            << 
557 void ct_user_enter(enum ctx_state state)       << 
558 {                                                 107 {
559         unsigned long flags;                      108         unsigned long flags;
560                                                   109 
561         /*                                        110         /*
562          * Some contexts may involve an except    111          * Some contexts may involve an exception occuring in an irq,
563          * leading to that nesting:               112          * leading to that nesting:
564          * ct_irq_enter() rcu_eqs_exit(true) r !! 113          * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
565          * This would mess up the dyntick_nest    114          * This would mess up the dyntick_nesting count though. And rcu_irq_*()
566          * helpers are enough to protect RCU u    115          * helpers are enough to protect RCU uses inside the exception. So
567          * just return immediately if we detec    116          * just return immediately if we detect we are in an IRQ.
568          */                                       117          */
569         if (in_interrupt())                       118         if (in_interrupt())
570                 return;                           119                 return;
571                                                   120 
572         local_irq_save(flags);                    121         local_irq_save(flags);
573         __ct_user_enter(state);                !! 122         __context_tracking_enter(state);
574         local_irq_restore(flags);                 123         local_irq_restore(flags);
575 }                                                 124 }
576 NOKPROBE_SYMBOL(ct_user_enter);                !! 125 NOKPROBE_SYMBOL(context_tracking_enter);
577 EXPORT_SYMBOL_GPL(ct_user_enter);              !! 126 EXPORT_SYMBOL_GPL(context_tracking_enter);
578                                                   127 
579 /**                                            !! 128 void context_tracking_user_enter(void)
580  * user_enter_callable() - Unfortunate ASM cal << 
581  *                         archs that didn't m << 
582  *                         static key from low << 
583  *                                             << 
584  * This OBSOLETE function should be noinstr bu << 
585  * local_irq_restore(), involving illegal RCU  << 
586  * This is unlikely to be fixed as this functi << 
587  * way is to call user_enter_irqoff(). It shou << 
588  * responsibility to call into context trackin << 
589  */                                            << 
590 void user_enter_callable(void)                 << 
591 {                                                 129 {
592         user_enter();                             130         user_enter();
593 }                                                 131 }
594 NOKPROBE_SYMBOL(user_enter_callable);          !! 132 NOKPROBE_SYMBOL(context_tracking_user_enter);
595                                                   133 
596 /**                                               134 /**
597  * __ct_user_exit - Inform the context trackin !! 135  * context_tracking_exit - Inform the context tracking that the CPU is
598  *                  exiting user or guest mode !! 136  *                         exiting user or guest mode and entering the kernel.
599  *                                             << 
600  * @state: userspace context-tracking state be << 
601  *                                                137  *
602  * This function must be called after we enter    138  * This function must be called after we entered the kernel from user or
603  * guest space before any use of RCU read side    139  * guest space before any use of RCU read side critical section. This
604  * potentially include any high level kernel c    140  * potentially include any high level kernel code like syscalls, exceptions,
605  * signal handling, etc...                        141  * signal handling, etc...
606  *                                                142  *
607  * This call supports re-entrancy. This way it    143  * This call supports re-entrancy. This way it can be called from any exception
608  * handler without needing to know if we came     144  * handler without needing to know if we came from userspace or not.
609  */                                               145  */
610 void noinstr __ct_user_exit(enum ctx_state sta !! 146 void noinstr __context_tracking_exit(enum ctx_state state)
611 {                                                 147 {
612         struct context_tracking *ct = this_cpu << 
613                                                << 
614         if (!context_tracking_recursion_enter(    148         if (!context_tracking_recursion_enter())
615                 return;                           149                 return;
616                                                   150 
617         if (__ct_state() == state) {           !! 151         if (__this_cpu_read(context_tracking.state) == state) {
618                 if (ct->active) {              !! 152                 if (__this_cpu_read(context_tracking.active)) {
619                         /*                        153                         /*
620                          * Exit RCU idle mode  !! 154                          * We are going to run code that may use RCU. Inform
621                          * run a RCU read side !! 155                          * RCU core about that (ie: we may need the tick again).
622                          */                       156                          */
623                         ct_kernel_enter(true,  !! 157                         rcu_user_exit();
624                         if (state == CONTEXT_U    158                         if (state == CONTEXT_USER) {
625                                 instrumentatio    159                                 instrumentation_begin();
626                                 vtime_user_exi    160                                 vtime_user_exit(current);
627                                 trace_user_exi    161                                 trace_user_exit(0);
628                                 instrumentatio    162                                 instrumentation_end();
629                         }                         163                         }
630                                                << 
631                         /*                     << 
632                          * Special case if we  << 
633                          * cputime accounting  << 
634                          * In this we case we  << 
635                          */                    << 
636                         if (!IS_ENABLED(CONFIG << 
637                                 raw_atomic_set << 
638                                                << 
639                 } else {                       << 
640                         if (!IS_ENABLED(CONFIG << 
641                                 /* Tracking fo << 
642                                 raw_atomic_set << 
643                         } else {               << 
644                                 /*             << 
645                                  * Tracking fo << 
646                                  * with NMIs.  << 
647                                  * RCU only re << 
648                                  * ordered.    << 
649                                  */            << 
650                                 raw_atomic_sub << 
651                         }                      << 
652                 }                                 164                 }
                                                   >> 165                 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
653         }                                         166         }
654         context_tracking_recursion_exit();        167         context_tracking_recursion_exit();
655 }                                                 168 }
656 EXPORT_SYMBOL_GPL(__ct_user_exit);             !! 169 EXPORT_SYMBOL_GPL(__context_tracking_exit);
657                                                   170 
658 /*                                             !! 171 void context_tracking_exit(enum ctx_state state)
659  * OBSOLETE:                                   << 
660  * This function should be noinstr but the bel << 
661  * unsafe because it involves illegal RCU uses << 
662  * This is unlikely to be fixed as this functi << 
663  * way is to call __context_tracking_exit() th << 
664  * or context_tracking_guest_exit(). It should << 
665  * responsibility to call into context trackin << 
666  */                                            << 
667 void ct_user_exit(enum ctx_state state)        << 
668 {                                                 172 {
669         unsigned long flags;                      173         unsigned long flags;
670                                                   174 
671         if (in_interrupt())                       175         if (in_interrupt())
672                 return;                           176                 return;
673                                                   177 
674         local_irq_save(flags);                    178         local_irq_save(flags);
675         __ct_user_exit(state);                 !! 179         __context_tracking_exit(state);
676         local_irq_restore(flags);                 180         local_irq_restore(flags);
677 }                                                 181 }
678 NOKPROBE_SYMBOL(ct_user_exit);                 !! 182 NOKPROBE_SYMBOL(context_tracking_exit);
679 EXPORT_SYMBOL_GPL(ct_user_exit);               !! 183 EXPORT_SYMBOL_GPL(context_tracking_exit);
680                                                   184 
681 /**                                            !! 185 void context_tracking_user_exit(void)
682  * user_exit_callable() - Unfortunate ASM call << 
683  *                        archs that didn't ma << 
684  *                        static key from low  << 
685  *                                             << 
686  * This OBSOLETE function should be noinstr bu << 
687  * involving illegal RCU uses through tracing  << 
688  * to be fixed as this function is obsolete. T << 
689  * user_exit_irqoff(). It should be the arch e << 
690  * call into context tracking with IRQs disabl << 
691  */                                            << 
692 void user_exit_callable(void)                  << 
693 {                                                 186 {
694         user_exit();                              187         user_exit();
695 }                                                 188 }
696 NOKPROBE_SYMBOL(user_exit_callable);           !! 189 NOKPROBE_SYMBOL(context_tracking_user_exit);
697                                                   190 
698 void __init ct_cpu_track_user(int cpu)         !! 191 void __init context_tracking_cpu_set(int cpu)
699 {                                                 192 {
700         static __initdata bool initialized = f    193         static __initdata bool initialized = false;
701                                                   194 
702         if (!per_cpu(context_tracking.active,     195         if (!per_cpu(context_tracking.active, cpu)) {
703                 per_cpu(context_tracking.activ    196                 per_cpu(context_tracking.active, cpu) = true;
704                 static_branch_inc(&context_tra    197                 static_branch_inc(&context_tracking_key);
705         }                                         198         }
706                                                   199 
707         if (initialized)                          200         if (initialized)
708                 return;                           201                 return;
709                                                   202 
710 #ifdef CONFIG_HAVE_TIF_NOHZ                       203 #ifdef CONFIG_HAVE_TIF_NOHZ
711         /*                                        204         /*
712          * Set TIF_NOHZ to init/0 and let it p    205          * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
713          * This assumes that init is the only     206          * This assumes that init is the only task at this early boot stage.
714          */                                       207          */
715         set_tsk_thread_flag(&init_task, TIF_NO    208         set_tsk_thread_flag(&init_task, TIF_NOHZ);
716 #endif                                            209 #endif
717         WARN_ON_ONCE(!tasklist_empty());          210         WARN_ON_ONCE(!tasklist_empty());
718                                                   211 
719         initialized = true;                       212         initialized = true;
720 }                                                 213 }
721                                                   214 
722 #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE      !! 215 #ifdef CONFIG_CONTEXT_TRACKING_FORCE
723 void __init context_tracking_init(void)           216 void __init context_tracking_init(void)
724 {                                                 217 {
725         int cpu;                                  218         int cpu;
726                                                   219 
727         for_each_possible_cpu(cpu)                220         for_each_possible_cpu(cpu)
728                 ct_cpu_track_user(cpu);        !! 221                 context_tracking_cpu_set(cpu);
729 }                                                 222 }
730 #endif                                            223 #endif
731                                                << 
732 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_USER  << 
733                                                   224 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php