1 // SPDX-License-Identifier: GPL-2.0-only 1 2 /* 3 * Context tracking: Probe on high level conte 4 * userspace, guest or idle. 5 * 6 * This is used by RCU to remove its dependenc 7 * runs in idle, userspace or guest mode. 8 * 9 * User/guest tracking started by Frederic Wei 10 * 11 * Copyright (C) 2012 Red Hat, Inc., Frederic 12 * 13 * Many thanks to Gilad Ben-Yossef, Paul McKen 14 * Steven Rostedt, Peter Zijlstra for suggesti 15 * 16 * RCU extended quiescent state bits imported 17 * where the relevant authorship may be found. 18 */ 19 20 #include <linux/context_tracking.h> 21 #include <linux/rcupdate.h> 22 #include <linux/sched.h> 23 #include <linux/hardirq.h> 24 #include <linux/export.h> 25 #include <linux/kprobes.h> 26 #include <trace/events/rcu.h> 27 28 29 DEFINE_PER_CPU(struct context_tracking, contex 30 #ifdef CONFIG_CONTEXT_TRACKING_IDLE 31 .dynticks_nesting = 1, 32 .dynticks_nmi_nesting = DYNTICK_IRQ_NO 33 #endif 34 .state = ATOMIC_INIT(RCU_DYNTICKS_IDX) 35 }; 36 EXPORT_SYMBOL_GPL(context_tracking); 37 38 #ifdef CONFIG_CONTEXT_TRACKING_IDLE 39 #define TPS(x) tracepoint_string(x) 40 41 /* Record the current task on dyntick-idle ent 42 static __always_inline void rcu_dynticks_task_ 43 { 44 #if defined(CONFIG_TASKS_RCU) && defined(CONFI 45 WRITE_ONCE(current->rcu_tasks_idle_cpu 46 #endif /* #if defined(CONFIG_TASKS_RCU) && def 47 } 48 49 /* Record no current task on dyntick-idle exit 50 static __always_inline void rcu_dynticks_task_ 51 { 52 #if defined(CONFIG_TASKS_RCU) && defined(CONFI 53 WRITE_ONCE(current->rcu_tasks_idle_cpu 54 #endif /* #if defined(CONFIG_TASKS_RCU) && def 55 } 56 57 /* Turn on heavyweight RCU tasks trace readers 58 static __always_inline void rcu_dynticks_task_ 59 { 60 #ifdef CONFIG_TASKS_TRACE_RCU 61 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_ 62 current->trc_reader_special.b. 63 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 64 } 65 66 /* Turn off heavyweight RCU tasks trace reader 67 static __always_inline void rcu_dynticks_task_ 68 { 69 #ifdef CONFIG_TASKS_TRACE_RCU 70 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_ 71 current->trc_reader_special.b. 72 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 73 } 74 75 /* 76 * Record entry into an extended quiescent sta 77 * called when not already in an extended quie 78 * RCU is watching prior to the call to this f 79 * watching upon return. 80 */ 81 static noinstr void ct_kernel_exit_state(int o 82 { 83 int seq; 84 85 /* 86 * CPUs seeing atomic_add_return() mus 87 * critical sections, and we also must 88 * next idle sojourn. 89 */ 90 rcu_dynticks_task_trace_enter(); // B 91 seq = ct_state_inc(offset); 92 // RCU is no longer watching. Better 93 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS 94 } 95 96 /* 97 * Record exit from an extended quiescent stat 98 * called from an extended quiescent state, th 99 * prior to the call to this function and is w 100 */ 101 static noinstr void ct_kernel_enter_state(int 102 { 103 int seq; 104 105 /* 106 * CPUs seeing atomic_add_return() mus 107 * and we also must force ordering wit 108 * critical section. 109 */ 110 seq = ct_state_inc(offset); 111 // RCU is now watching. Better not be 112 rcu_dynticks_task_trace_exit(); // Af 113 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS 114 } 115 116 /* 117 * Enter an RCU extended quiescent state, whic 118 * idle loop or adaptive-tickless usermode exe 119 * 120 * We crowbar the ->dynticks_nmi_nesting field 121 * the possibility of usermode upcalls having 122 * of interrupt nesting level during the prior 123 */ 124 static void noinstr ct_kernel_exit(bool user, 125 { 126 struct context_tracking *ct = this_cpu 127 128 WARN_ON_ONCE(ct_dynticks_nmi_nesting() 129 WRITE_ONCE(ct->dynticks_nmi_nesting, 0 130 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS 131 ct_dynticks_nesting() == 132 if (ct_dynticks_nesting() != 1) { 133 // RCU will still be watching, 134 ct->dynticks_nesting--; 135 return; 136 } 137 138 instrumentation_begin(); 139 lockdep_assert_irqs_disabled(); 140 trace_rcu_dyntick(TPS("Start"), ct_dyn 141 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS 142 rcu_preempt_deferred_qs(current); 143 144 // instrumentation for the noinstr ct_ 145 instrument_atomic_write(&ct->state, si 146 147 instrumentation_end(); 148 WRITE_ONCE(ct->dynticks_nesting, 0); / 149 // RCU is watching here ... 150 ct_kernel_exit_state(offset); 151 // ... but is no longer watching here. 152 rcu_dynticks_task_enter(); 153 } 154 155 /* 156 * Exit an RCU extended quiescent state, which 157 * idle loop or adaptive-tickless usermode exe 158 * 159 * We crowbar the ->dynticks_nmi_nesting field 160 * allow for the possibility of usermode upcal 161 * interrupt nesting level during the busy per 162 */ 163 static void noinstr ct_kernel_enter(bool user, 164 { 165 struct context_tracking *ct = this_cpu 166 long oldval; 167 168 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS 169 oldval = ct_dynticks_nesting(); 170 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS 171 if (oldval) { 172 // RCU was already watching, s 173 ct->dynticks_nesting++; 174 return; 175 } 176 rcu_dynticks_task_exit(); 177 // RCU is not watching here ... 178 ct_kernel_enter_state(offset); 179 // ... but is watching here. 180 instrumentation_begin(); 181 182 // instrumentation for the noinstr ct_ 183 instrument_atomic_write(&ct->state, si 184 185 trace_rcu_dyntick(TPS("End"), ct_dynti 186 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS 187 WRITE_ONCE(ct->dynticks_nesting, 1); 188 WARN_ON_ONCE(ct_dynticks_nmi_nesting() 189 WRITE_ONCE(ct->dynticks_nmi_nesting, D 190 instrumentation_end(); 191 } 192 193 /** 194 * ct_nmi_exit - inform RCU of exit from NMI c 195 * 196 * If we are returning from the outermost NMI 197 * RCU-idle period, update ct->state and ct->d 198 * to let the RCU grace-period handling know t 199 * being RCU-idle. 200 * 201 * If you add or remove a call to ct_nmi_exit( 202 * with CONFIG_RCU_EQS_DEBUG=y. 203 */ 204 void noinstr ct_nmi_exit(void) 205 { 206 struct context_tracking *ct = this_cpu 207 208 instrumentation_begin(); 209 /* 210 * Check for ->dynticks_nmi_nesting un 211 * (We are exiting an NMI handler, so 212 * to us!) 213 */ 214 WARN_ON_ONCE(ct_dynticks_nmi_nesting() 215 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_ 216 217 /* 218 * If the nesting level is not 1, the 219 * leave it in non-RCU-idle state. 220 */ 221 if (ct_dynticks_nmi_nesting() != 1) { 222 trace_rcu_dyntick(TPS("--="), 223 ct_dynticks( 224 WRITE_ONCE(ct->dynticks_nmi_ne 225 ct_dynticks_nmi_nes 226 instrumentation_end(); 227 return; 228 } 229 230 /* This NMI interrupted an RCU-idle CP 231 trace_rcu_dyntick(TPS("Startirq"), ct_ 232 WRITE_ONCE(ct->dynticks_nmi_nesting, 0 233 234 // instrumentation for the noinstr ct_ 235 instrument_atomic_write(&ct->state, si 236 instrumentation_end(); 237 238 // RCU is watching here ... 239 ct_kernel_exit_state(RCU_DYNTICKS_IDX) 240 // ... but is no longer watching here. 241 242 if (!in_nmi()) 243 rcu_dynticks_task_enter(); 244 } 245 246 /** 247 * ct_nmi_enter - inform RCU of entry to NMI c 248 * 249 * If the CPU was idle from RCU's viewpoint, u 250 * ct->dynticks_nmi_nesting to let the RCU gra 251 * that the CPU is active. This implementatio 252 * long as the nesting level does not overflow 253 * run out of stack space first.) 254 * 255 * If you add or remove a call to ct_nmi_enter 256 * with CONFIG_RCU_EQS_DEBUG=y. 257 */ 258 void noinstr ct_nmi_enter(void) 259 { 260 long incby = 2; 261 struct context_tracking *ct = this_cpu 262 263 /* Complain about underflow. */ 264 WARN_ON_ONCE(ct_dynticks_nmi_nesting() 265 266 /* 267 * If idle from RCU viewpoint, atomica 268 * to mark non-idle and increment ->dy 269 * Otherwise, increment ->dynticks_nmi 270 * if ->dynticks_nmi_nesting is equal 271 * to be in the outermost NMI handler 272 * period (observation due to Andy Lut 273 */ 274 if (rcu_dynticks_curr_cpu_in_eqs()) { 275 276 if (!in_nmi()) 277 rcu_dynticks_task_exit 278 279 // RCU is not watching here .. 280 ct_kernel_enter_state(RCU_DYNT 281 // ... but is watching here. 282 283 instrumentation_begin(); 284 // instrumentation for the noi 285 instrument_atomic_read(&ct->st 286 // instrumentation for the noi 287 instrument_atomic_write(&ct->s 288 289 incby = 1; 290 } else if (!in_nmi()) { 291 instrumentation_begin(); 292 rcu_irq_enter_check_tick(); 293 } else { 294 instrumentation_begin(); 295 } 296 297 trace_rcu_dyntick(incby == 1 ? TPS("En 298 ct_dynticks_nmi_nest 299 ct_dynticks_nmi_nest 300 instrumentation_end(); 301 WRITE_ONCE(ct->dynticks_nmi_nesting, / 302 ct_dynticks_nmi_nesting() + 303 barrier(); 304 } 305 306 /** 307 * ct_idle_enter - inform RCU that current CPU 308 * 309 * Enter idle mode, in other words, -leave- th 310 * read-side critical sections can occur. (Th 311 * critical sections can occur in irq handlers 312 * handled by irq_enter() and irq_exit().) 313 * 314 * If you add or remove a call to ct_idle_ente 315 * CONFIG_RCU_EQS_DEBUG=y. 316 */ 317 void noinstr ct_idle_enter(void) 318 { 319 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS 320 ct_kernel_exit(false, RCU_DYNTICKS_IDX 321 } 322 EXPORT_SYMBOL_GPL(ct_idle_enter); 323 324 /** 325 * ct_idle_exit - inform RCU that current CPU 326 * 327 * Exit idle mode, in other words, -enter- the 328 * read-side critical sections can occur. 329 * 330 * If you add or remove a call to ct_idle_exit 331 * CONFIG_RCU_EQS_DEBUG=y. 332 */ 333 void noinstr ct_idle_exit(void) 334 { 335 unsigned long flags; 336 337 raw_local_irq_save(flags); 338 ct_kernel_enter(false, RCU_DYNTICKS_ID 339 raw_local_irq_restore(flags); 340 } 341 EXPORT_SYMBOL_GPL(ct_idle_exit); 342 343 /** 344 * ct_irq_enter - inform RCU that current CPU 345 * 346 * Enter an interrupt handler, which might pos 347 * idle mode, in other words, entering the mod 348 * sections can occur. The caller must have d 349 * 350 * Note that the Linux kernel is fully capable 351 * handler that it never exits, for example wh 352 * This code assumes that the idle loop never 353 * If your architecture's idle loop does do up 354 * anything else that results in unbalanced ca 355 * irq_exit() functions), RCU will give you wh 356 * But very infrequently and irreproducibly. 357 * 358 * Use things like work queues to work around 359 * 360 * You have been warned. 361 * 362 * If you add or remove a call to ct_irq_enter 363 * CONFIG_RCU_EQS_DEBUG=y. 364 */ 365 noinstr void ct_irq_enter(void) 366 { 367 lockdep_assert_irqs_disabled(); 368 ct_nmi_enter(); 369 } 370 371 /** 372 * ct_irq_exit - inform RCU that current CPU i 373 * 374 * Exit from an interrupt handler, which might 375 * idle mode, in other words, leaving the mode 376 * sections can occur. The caller must have d 377 * 378 * This code assumes that the idle loop never 379 * result in unbalanced calls to irq_enter() a 380 * architecture's idle loop violates this assu 381 * you deserve, good and hard. But very infre 382 * 383 * Use things like work queues to work around 384 * 385 * You have been warned. 386 * 387 * If you add or remove a call to ct_irq_exit( 388 * CONFIG_RCU_EQS_DEBUG=y. 389 */ 390 noinstr void ct_irq_exit(void) 391 { 392 lockdep_assert_irqs_disabled(); 393 ct_nmi_exit(); 394 } 395 396 /* 397 * Wrapper for ct_irq_enter() where interrupts 398 * 399 * If you add or remove a call to ct_irq_enter 400 * with CONFIG_RCU_EQS_DEBUG=y. 401 */ 402 void ct_irq_enter_irqson(void) 403 { 404 unsigned long flags; 405 406 local_irq_save(flags); 407 ct_irq_enter(); 408 local_irq_restore(flags); 409 } 410 411 /* 412 * Wrapper for ct_irq_exit() where interrupts 413 * 414 * If you add or remove a call to ct_irq_exit_ 415 * with CONFIG_RCU_EQS_DEBUG=y. 416 */ 417 void ct_irq_exit_irqson(void) 418 { 419 unsigned long flags; 420 421 local_irq_save(flags); 422 ct_irq_exit(); 423 local_irq_restore(flags); 424 } 425 #else 426 static __always_inline void ct_kernel_exit(boo 427 static __always_inline void ct_kernel_enter(bo 428 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE 429 430 #ifdef CONFIG_CONTEXT_TRACKING_USER 431 432 #define CREATE_TRACE_POINTS 433 #include <trace/events/context_tracking.h> 434 435 DEFINE_STATIC_KEY_FALSE_RO(context_tracking_ke 436 EXPORT_SYMBOL_GPL(context_tracking_key); 437 438 static noinstr bool context_tracking_recursion 439 { 440 int recursion; 441 442 recursion = __this_cpu_inc_return(cont 443 if (recursion == 1) 444 return true; 445 446 WARN_ONCE((recursion < 1), "Invalid co 447 __this_cpu_dec(context_tracking.recurs 448 449 return false; 450 } 451 452 static __always_inline void context_tracking_r 453 { 454 __this_cpu_dec(context_tracking.recurs 455 } 456 457 /** 458 * __ct_user_enter - Inform the context tracki 459 * to enter user or guest sp 460 * 461 * @state: userspace context-tracking state to 462 * 463 * This function must be called right before w 464 * to user or guest space, when it's guarantee 465 * instructions to execute won't use any RCU r 466 * because this function sets RCU in extended 467 */ 468 void noinstr __ct_user_enter(enum ctx_state st 469 { 470 struct context_tracking *ct = this_cpu 471 lockdep_assert_irqs_disabled(); 472 473 /* Kernel threads aren't supposed to g 474 WARN_ON_ONCE(!current->mm); 475 476 if (!context_tracking_recursion_enter( 477 return; 478 479 if (__ct_state() != state) { 480 if (ct->active) { 481 /* 482 * At this stage, only 483 * then we'll run in u 484 * any RCU read-side c 485 * user_exit() or ct_i 486 * on the tick. 487 */ 488 if (state == CONTEXT_U 489 instrumentatio 490 trace_user_ent 491 vtime_user_ent 492 instrumentatio 493 } 494 /* 495 * Other than generic 496 * rescheduling opport 497 * that will fire and 498 */ 499 rcu_irq_work_resched() 500 501 /* 502 * Enter RCU idle mode 503 * is permitted betwee 504 * CPU doesn't need to 505 * when the CPU runs i 506 */ 507 ct_kernel_exit(true, R 508 509 /* 510 * Special case if we 511 * cputime accounting 512 * In this we case we 513 */ 514 if (!IS_ENABLED(CONFIG 515 raw_atomic_set 516 } else { 517 /* 518 * Even if context tra 519 * the full dynticks m 520 * context transitions 521 * other CPUs. 522 * If a task triggers 523 * handler and then mi 524 * the exception retur 525 * This information ca 526 * exception_enter(). 527 * OTOH we can spare t 528 * is false because we 529 */ 530 if (!IS_ENABLED(CONFIG 531 /* Tracking fo 532 raw_atomic_set 533 } else { 534 /* 535 * Tracking fo 536 * with NMIs. 537 * RCU only re 538 * ordered. 539 */ 540 raw_atomic_add 541 } 542 } 543 } 544 context_tracking_recursion_exit(); 545 } 546 EXPORT_SYMBOL_GPL(__ct_user_enter); 547 548 /* 549 * OBSOLETE: 550 * This function should be noinstr but the bel 551 * unsafe because it involves illegal RCU uses 552 * This is unlikely to be fixed as this functi 553 * way is to call __context_tracking_enter() t 554 * or context_tracking_guest_enter(). It shoul 555 * responsibility to call into context trackin 556 */ 557 void ct_user_enter(enum ctx_state state) 558 { 559 unsigned long flags; 560 561 /* 562 * Some contexts may involve an except 563 * leading to that nesting: 564 * ct_irq_enter() rcu_eqs_exit(true) r 565 * This would mess up the dyntick_nest 566 * helpers are enough to protect RCU u 567 * just return immediately if we detec 568 */ 569 if (in_interrupt()) 570 return; 571 572 local_irq_save(flags); 573 __ct_user_enter(state); 574 local_irq_restore(flags); 575 } 576 NOKPROBE_SYMBOL(ct_user_enter); 577 EXPORT_SYMBOL_GPL(ct_user_enter); 578 579 /** 580 * user_enter_callable() - Unfortunate ASM cal 581 * archs that didn't m 582 * static key from low 583 * 584 * This OBSOLETE function should be noinstr bu 585 * local_irq_restore(), involving illegal RCU 586 * This is unlikely to be fixed as this functi 587 * way is to call user_enter_irqoff(). It shou 588 * responsibility to call into context trackin 589 */ 590 void user_enter_callable(void) 591 { 592 user_enter(); 593 } 594 NOKPROBE_SYMBOL(user_enter_callable); 595 596 /** 597 * __ct_user_exit - Inform the context trackin 598 * exiting user or guest mode 599 * 600 * @state: userspace context-tracking state be 601 * 602 * This function must be called after we enter 603 * guest space before any use of RCU read side 604 * potentially include any high level kernel c 605 * signal handling, etc... 606 * 607 * This call supports re-entrancy. This way it 608 * handler without needing to know if we came 609 */ 610 void noinstr __ct_user_exit(enum ctx_state sta 611 { 612 struct context_tracking *ct = this_cpu 613 614 if (!context_tracking_recursion_enter( 615 return; 616 617 if (__ct_state() == state) { 618 if (ct->active) { 619 /* 620 * Exit RCU idle mode 621 * run a RCU read side 622 */ 623 ct_kernel_enter(true, 624 if (state == CONTEXT_U 625 instrumentatio 626 vtime_user_exi 627 trace_user_exi 628 instrumentatio 629 } 630 631 /* 632 * Special case if we 633 * cputime accounting 634 * In this we case we 635 */ 636 if (!IS_ENABLED(CONFIG 637 raw_atomic_set 638 639 } else { 640 if (!IS_ENABLED(CONFIG 641 /* Tracking fo 642 raw_atomic_set 643 } else { 644 /* 645 * Tracking fo 646 * with NMIs. 647 * RCU only re 648 * ordered. 649 */ 650 raw_atomic_sub 651 } 652 } 653 } 654 context_tracking_recursion_exit(); 655 } 656 EXPORT_SYMBOL_GPL(__ct_user_exit); 657 658 /* 659 * OBSOLETE: 660 * This function should be noinstr but the bel 661 * unsafe because it involves illegal RCU uses 662 * This is unlikely to be fixed as this functi 663 * way is to call __context_tracking_exit() th 664 * or context_tracking_guest_exit(). It should 665 * responsibility to call into context trackin 666 */ 667 void ct_user_exit(enum ctx_state state) 668 { 669 unsigned long flags; 670 671 if (in_interrupt()) 672 return; 673 674 local_irq_save(flags); 675 __ct_user_exit(state); 676 local_irq_restore(flags); 677 } 678 NOKPROBE_SYMBOL(ct_user_exit); 679 EXPORT_SYMBOL_GPL(ct_user_exit); 680 681 /** 682 * user_exit_callable() - Unfortunate ASM call 683 * archs that didn't ma 684 * static key from low 685 * 686 * This OBSOLETE function should be noinstr bu 687 * involving illegal RCU uses through tracing 688 * to be fixed as this function is obsolete. T 689 * user_exit_irqoff(). It should be the arch e 690 * call into context tracking with IRQs disabl 691 */ 692 void user_exit_callable(void) 693 { 694 user_exit(); 695 } 696 NOKPROBE_SYMBOL(user_exit_callable); 697 698 void __init ct_cpu_track_user(int cpu) 699 { 700 static __initdata bool initialized = f 701 702 if (!per_cpu(context_tracking.active, 703 per_cpu(context_tracking.activ 704 static_branch_inc(&context_tra 705 } 706 707 if (initialized) 708 return; 709 710 #ifdef CONFIG_HAVE_TIF_NOHZ 711 /* 712 * Set TIF_NOHZ to init/0 and let it p 713 * This assumes that init is the only 714 */ 715 set_tsk_thread_flag(&init_task, TIF_NO 716 #endif 717 WARN_ON_ONCE(!tasklist_empty()); 718 719 initialized = true; 720 } 721 722 #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE 723 void __init context_tracking_init(void) 724 { 725 int cpu; 726 727 for_each_possible_cpu(cpu) 728 ct_cpu_track_user(cpu); 729 } 730 #endif 731 732 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_USER 733
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.