1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Scalability test comparing RCU vs other mechanisms 4 // for acquiring references on objects. 5 // 6 // Copyright (C) Google, 2020. 7 // 8 // Author: Joel Fernandes <joel@joelfernandes.org> 9 10 #define pr_fmt(fmt) fmt 11 12 #include <linux/atomic.h> 13 #include <linux/bitops.h> 14 #include <linux/completion.h> 15 #include <linux/cpu.h> 16 #include <linux/delay.h> 17 #include <linux/err.h> 18 #include <linux/init.h> 19 #include <linux/interrupt.h> 20 #include <linux/kthread.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 25 #include <linux/notifier.h> 26 #include <linux/percpu.h> 27 #include <linux/rcupdate.h> 28 #include <linux/rcupdate_trace.h> 29 #include <linux/reboot.h> 30 #include <linux/sched.h> 31 #include <linux/spinlock.h> 32 #include <linux/smp.h> 33 #include <linux/stat.h> 34 #include <linux/srcu.h> 35 #include <linux/slab.h> 36 #include <linux/torture.h> 37 #include <linux/types.h> 38 39 #include "rcu.h" 40 41 #define SCALE_FLAG "-ref-scale: " 42 43 #define SCALEOUT(s, x...) \ 44 pr_alert("%s" SCALE_FLAG s, scale_type, ## x) 45 46 #define VERBOSE_SCALEOUT(s, x...) \ 47 do { \ 48 if (verbose) \ 49 pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \ 50 } while (0) 51 52 static atomic_t verbose_batch_ctr; 53 54 #define VERBOSE_SCALEOUT_BATCH(s, x...) \ 55 do { \ 56 if (verbose && \ 57 (verbose_batched <= 0 || \ 58 !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \ 59 schedule_timeout_uninterruptible(1); \ 60 pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \ 61 } \ 62 } while (0) 63 64 #define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x) 65 66 MODULE_DESCRIPTION("Scalability test for object reference mechanisms"); 67 MODULE_LICENSE("GPL"); 68 MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>"); 69 70 static char *scale_type = "rcu"; 71 module_param(scale_type, charp, 0444); 72 MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock."); 73 74 torture_param(int, verbose, 0, "Enable verbose debugging printk()s"); 75 torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s"); 76 77 // Wait until there are multiple CPUs before starting test. 78 torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0, 79 "Holdoff time before test start (s)"); 80 // Number of typesafe_lookup structures, that is, the degree of concurrency. 81 torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures."); 82 // Number of loops per experiment, all readers execute operations concurrently. 83 torture_param(long, loops, 10000, "Number of loops per experiment."); 84 // Number of readers, with -1 defaulting to about 75% of the CPUs. 85 torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs."); 86 // Number of runs. 87 torture_param(int, nruns, 30, "Number of experiments to run."); 88 // Reader delay in nanoseconds, 0 for no delay. 89 torture_param(int, readdelay, 0, "Read-side delay in nanoseconds."); 90 91 #ifdef MODULE 92 # define REFSCALE_SHUTDOWN 0 93 #else 94 # define REFSCALE_SHUTDOWN 1 95 #endif 96 97 torture_param(bool, shutdown, REFSCALE_SHUTDOWN, 98 "Shutdown at end of scalability tests."); 99 100 struct reader_task { 101 struct task_struct *task; 102 int start_reader; 103 wait_queue_head_t wq; 104 u64 last_duration_ns; 105 }; 106 107 static struct task_struct *shutdown_task; 108 static wait_queue_head_t shutdown_wq; 109 110 static struct task_struct *main_task; 111 static wait_queue_head_t main_wq; 112 static int shutdown_start; 113 114 static struct reader_task *reader_tasks; 115 116 // Number of readers that are part of the current experiment. 117 static atomic_t nreaders_exp; 118 119 // Use to wait for all threads to start. 120 static atomic_t n_init; 121 static atomic_t n_started; 122 static atomic_t n_warmedup; 123 static atomic_t n_cooleddown; 124 125 // Track which experiment is currently running. 126 static int exp_idx; 127 128 // Operations vector for selecting different types of tests. 129 struct ref_scale_ops { 130 bool (*init)(void); 131 void (*cleanup)(void); 132 void (*readsection)(const int nloops); 133 void (*delaysection)(const int nloops, const int udl, const int ndl); 134 const char *name; 135 }; 136 137 static struct ref_scale_ops *cur_ops; 138 139 static void un_delay(const int udl, const int ndl) 140 { 141 if (udl) 142 udelay(udl); 143 if (ndl) 144 ndelay(ndl); 145 } 146 147 static void ref_rcu_read_section(const int nloops) 148 { 149 int i; 150 151 for (i = nloops; i >= 0; i--) { 152 rcu_read_lock(); 153 rcu_read_unlock(); 154 } 155 } 156 157 static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl) 158 { 159 int i; 160 161 for (i = nloops; i >= 0; i--) { 162 rcu_read_lock(); 163 un_delay(udl, ndl); 164 rcu_read_unlock(); 165 } 166 } 167 168 static bool rcu_sync_scale_init(void) 169 { 170 return true; 171 } 172 173 static struct ref_scale_ops rcu_ops = { 174 .init = rcu_sync_scale_init, 175 .readsection = ref_rcu_read_section, 176 .delaysection = ref_rcu_delay_section, 177 .name = "rcu" 178 }; 179 180 // Definitions for SRCU ref scale testing. 181 DEFINE_STATIC_SRCU(srcu_refctl_scale); 182 static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale; 183 184 static void srcu_ref_scale_read_section(const int nloops) 185 { 186 int i; 187 int idx; 188 189 for (i = nloops; i >= 0; i--) { 190 idx = srcu_read_lock(srcu_ctlp); 191 srcu_read_unlock(srcu_ctlp, idx); 192 } 193 } 194 195 static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl) 196 { 197 int i; 198 int idx; 199 200 for (i = nloops; i >= 0; i--) { 201 idx = srcu_read_lock(srcu_ctlp); 202 un_delay(udl, ndl); 203 srcu_read_unlock(srcu_ctlp, idx); 204 } 205 } 206 207 static struct ref_scale_ops srcu_ops = { 208 .init = rcu_sync_scale_init, 209 .readsection = srcu_ref_scale_read_section, 210 .delaysection = srcu_ref_scale_delay_section, 211 .name = "srcu" 212 }; 213 214 #ifdef CONFIG_TASKS_RCU 215 216 // Definitions for RCU Tasks ref scale testing: Empty read markers. 217 // These definitions also work for RCU Rude readers. 218 static void rcu_tasks_ref_scale_read_section(const int nloops) 219 { 220 int i; 221 222 for (i = nloops; i >= 0; i--) 223 continue; 224 } 225 226 static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl) 227 { 228 int i; 229 230 for (i = nloops; i >= 0; i--) 231 un_delay(udl, ndl); 232 } 233 234 static struct ref_scale_ops rcu_tasks_ops = { 235 .init = rcu_sync_scale_init, 236 .readsection = rcu_tasks_ref_scale_read_section, 237 .delaysection = rcu_tasks_ref_scale_delay_section, 238 .name = "rcu-tasks" 239 }; 240 241 #define RCU_TASKS_OPS &rcu_tasks_ops, 242 243 #else // #ifdef CONFIG_TASKS_RCU 244 245 #define RCU_TASKS_OPS 246 247 #endif // #else // #ifdef CONFIG_TASKS_RCU 248 249 #ifdef CONFIG_TASKS_TRACE_RCU 250 251 // Definitions for RCU Tasks Trace ref scale testing. 252 static void rcu_trace_ref_scale_read_section(const int nloops) 253 { 254 int i; 255 256 for (i = nloops; i >= 0; i--) { 257 rcu_read_lock_trace(); 258 rcu_read_unlock_trace(); 259 } 260 } 261 262 static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl) 263 { 264 int i; 265 266 for (i = nloops; i >= 0; i--) { 267 rcu_read_lock_trace(); 268 un_delay(udl, ndl); 269 rcu_read_unlock_trace(); 270 } 271 } 272 273 static struct ref_scale_ops rcu_trace_ops = { 274 .init = rcu_sync_scale_init, 275 .readsection = rcu_trace_ref_scale_read_section, 276 .delaysection = rcu_trace_ref_scale_delay_section, 277 .name = "rcu-trace" 278 }; 279 280 #define RCU_TRACE_OPS &rcu_trace_ops, 281 282 #else // #ifdef CONFIG_TASKS_TRACE_RCU 283 284 #define RCU_TRACE_OPS 285 286 #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU 287 288 // Definitions for reference count 289 static atomic_t refcnt; 290 291 static void ref_refcnt_section(const int nloops) 292 { 293 int i; 294 295 for (i = nloops; i >= 0; i--) { 296 atomic_inc(&refcnt); 297 atomic_dec(&refcnt); 298 } 299 } 300 301 static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl) 302 { 303 int i; 304 305 for (i = nloops; i >= 0; i--) { 306 atomic_inc(&refcnt); 307 un_delay(udl, ndl); 308 atomic_dec(&refcnt); 309 } 310 } 311 312 static struct ref_scale_ops refcnt_ops = { 313 .init = rcu_sync_scale_init, 314 .readsection = ref_refcnt_section, 315 .delaysection = ref_refcnt_delay_section, 316 .name = "refcnt" 317 }; 318 319 // Definitions for rwlock 320 static rwlock_t test_rwlock; 321 322 static bool ref_rwlock_init(void) 323 { 324 rwlock_init(&test_rwlock); 325 return true; 326 } 327 328 static void ref_rwlock_section(const int nloops) 329 { 330 int i; 331 332 for (i = nloops; i >= 0; i--) { 333 read_lock(&test_rwlock); 334 read_unlock(&test_rwlock); 335 } 336 } 337 338 static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl) 339 { 340 int i; 341 342 for (i = nloops; i >= 0; i--) { 343 read_lock(&test_rwlock); 344 un_delay(udl, ndl); 345 read_unlock(&test_rwlock); 346 } 347 } 348 349 static struct ref_scale_ops rwlock_ops = { 350 .init = ref_rwlock_init, 351 .readsection = ref_rwlock_section, 352 .delaysection = ref_rwlock_delay_section, 353 .name = "rwlock" 354 }; 355 356 // Definitions for rwsem 357 static struct rw_semaphore test_rwsem; 358 359 static bool ref_rwsem_init(void) 360 { 361 init_rwsem(&test_rwsem); 362 return true; 363 } 364 365 static void ref_rwsem_section(const int nloops) 366 { 367 int i; 368 369 for (i = nloops; i >= 0; i--) { 370 down_read(&test_rwsem); 371 up_read(&test_rwsem); 372 } 373 } 374 375 static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl) 376 { 377 int i; 378 379 for (i = nloops; i >= 0; i--) { 380 down_read(&test_rwsem); 381 un_delay(udl, ndl); 382 up_read(&test_rwsem); 383 } 384 } 385 386 static struct ref_scale_ops rwsem_ops = { 387 .init = ref_rwsem_init, 388 .readsection = ref_rwsem_section, 389 .delaysection = ref_rwsem_delay_section, 390 .name = "rwsem" 391 }; 392 393 // Definitions for global spinlock 394 static DEFINE_RAW_SPINLOCK(test_lock); 395 396 static void ref_lock_section(const int nloops) 397 { 398 int i; 399 400 preempt_disable(); 401 for (i = nloops; i >= 0; i--) { 402 raw_spin_lock(&test_lock); 403 raw_spin_unlock(&test_lock); 404 } 405 preempt_enable(); 406 } 407 408 static void ref_lock_delay_section(const int nloops, const int udl, const int ndl) 409 { 410 int i; 411 412 preempt_disable(); 413 for (i = nloops; i >= 0; i--) { 414 raw_spin_lock(&test_lock); 415 un_delay(udl, ndl); 416 raw_spin_unlock(&test_lock); 417 } 418 preempt_enable(); 419 } 420 421 static struct ref_scale_ops lock_ops = { 422 .readsection = ref_lock_section, 423 .delaysection = ref_lock_delay_section, 424 .name = "lock" 425 }; 426 427 // Definitions for global irq-save spinlock 428 429 static void ref_lock_irq_section(const int nloops) 430 { 431 unsigned long flags; 432 int i; 433 434 preempt_disable(); 435 for (i = nloops; i >= 0; i--) { 436 raw_spin_lock_irqsave(&test_lock, flags); 437 raw_spin_unlock_irqrestore(&test_lock, flags); 438 } 439 preempt_enable(); 440 } 441 442 static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl) 443 { 444 unsigned long flags; 445 int i; 446 447 preempt_disable(); 448 for (i = nloops; i >= 0; i--) { 449 raw_spin_lock_irqsave(&test_lock, flags); 450 un_delay(udl, ndl); 451 raw_spin_unlock_irqrestore(&test_lock, flags); 452 } 453 preempt_enable(); 454 } 455 456 static struct ref_scale_ops lock_irq_ops = { 457 .readsection = ref_lock_irq_section, 458 .delaysection = ref_lock_irq_delay_section, 459 .name = "lock-irq" 460 }; 461 462 // Definitions acquire-release. 463 static DEFINE_PER_CPU(unsigned long, test_acqrel); 464 465 static void ref_acqrel_section(const int nloops) 466 { 467 unsigned long x; 468 int i; 469 470 preempt_disable(); 471 for (i = nloops; i >= 0; i--) { 472 x = smp_load_acquire(this_cpu_ptr(&test_acqrel)); 473 smp_store_release(this_cpu_ptr(&test_acqrel), x + 1); 474 } 475 preempt_enable(); 476 } 477 478 static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl) 479 { 480 unsigned long x; 481 int i; 482 483 preempt_disable(); 484 for (i = nloops; i >= 0; i--) { 485 x = smp_load_acquire(this_cpu_ptr(&test_acqrel)); 486 un_delay(udl, ndl); 487 smp_store_release(this_cpu_ptr(&test_acqrel), x + 1); 488 } 489 preempt_enable(); 490 } 491 492 static struct ref_scale_ops acqrel_ops = { 493 .readsection = ref_acqrel_section, 494 .delaysection = ref_acqrel_delay_section, 495 .name = "acqrel" 496 }; 497 498 static volatile u64 stopopts; 499 500 static void ref_clock_section(const int nloops) 501 { 502 u64 x = 0; 503 int i; 504 505 preempt_disable(); 506 for (i = nloops; i >= 0; i--) 507 x += ktime_get_real_fast_ns(); 508 preempt_enable(); 509 stopopts = x; 510 } 511 512 static void ref_clock_delay_section(const int nloops, const int udl, const int ndl) 513 { 514 u64 x = 0; 515 int i; 516 517 preempt_disable(); 518 for (i = nloops; i >= 0; i--) { 519 x += ktime_get_real_fast_ns(); 520 un_delay(udl, ndl); 521 } 522 preempt_enable(); 523 stopopts = x; 524 } 525 526 static struct ref_scale_ops clock_ops = { 527 .readsection = ref_clock_section, 528 .delaysection = ref_clock_delay_section, 529 .name = "clock" 530 }; 531 532 static void ref_jiffies_section(const int nloops) 533 { 534 u64 x = 0; 535 int i; 536 537 preempt_disable(); 538 for (i = nloops; i >= 0; i--) 539 x += jiffies; 540 preempt_enable(); 541 stopopts = x; 542 } 543 544 static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl) 545 { 546 u64 x = 0; 547 int i; 548 549 preempt_disable(); 550 for (i = nloops; i >= 0; i--) { 551 x += jiffies; 552 un_delay(udl, ndl); 553 } 554 preempt_enable(); 555 stopopts = x; 556 } 557 558 static struct ref_scale_ops jiffies_ops = { 559 .readsection = ref_jiffies_section, 560 .delaysection = ref_jiffies_delay_section, 561 .name = "jiffies" 562 }; 563 564 //////////////////////////////////////////////////////////////////////// 565 // 566 // Methods leveraging SLAB_TYPESAFE_BY_RCU. 567 // 568 569 // Item to look up in a typesafe manner. Array of pointers to these. 570 struct refscale_typesafe { 571 atomic_t rts_refctr; // Used by all flavors 572 spinlock_t rts_lock; 573 seqlock_t rts_seqlock; 574 unsigned int a; 575 unsigned int b; 576 }; 577 578 static struct kmem_cache *typesafe_kmem_cachep; 579 static struct refscale_typesafe **rtsarray; 580 static long rtsarray_size; 581 static DEFINE_TORTURE_RANDOM_PERCPU(refscale_rand); 582 static bool (*rts_acquire)(struct refscale_typesafe *rtsp, unsigned int *start); 583 static bool (*rts_release)(struct refscale_typesafe *rtsp, unsigned int start); 584 585 // Conditionally acquire an explicit in-structure reference count. 586 static bool typesafe_ref_acquire(struct refscale_typesafe *rtsp, unsigned int *start) 587 { 588 return atomic_inc_not_zero(&rtsp->rts_refctr); 589 } 590 591 // Unconditionally release an explicit in-structure reference count. 592 static bool typesafe_ref_release(struct refscale_typesafe *rtsp, unsigned int start) 593 { 594 if (!atomic_dec_return(&rtsp->rts_refctr)) { 595 WRITE_ONCE(rtsp->a, rtsp->a + 1); 596 kmem_cache_free(typesafe_kmem_cachep, rtsp); 597 } 598 return true; 599 } 600 601 // Unconditionally acquire an explicit in-structure spinlock. 602 static bool typesafe_lock_acquire(struct refscale_typesafe *rtsp, unsigned int *start) 603 { 604 spin_lock(&rtsp->rts_lock); 605 return true; 606 } 607 608 // Unconditionally release an explicit in-structure spinlock. 609 static bool typesafe_lock_release(struct refscale_typesafe *rtsp, unsigned int start) 610 { 611 spin_unlock(&rtsp->rts_lock); 612 return true; 613 } 614 615 // Unconditionally acquire an explicit in-structure sequence lock. 616 static bool typesafe_seqlock_acquire(struct refscale_typesafe *rtsp, unsigned int *start) 617 { 618 *start = read_seqbegin(&rtsp->rts_seqlock); 619 return true; 620 } 621 622 // Conditionally release an explicit in-structure sequence lock. Return 623 // true if this release was successful, that is, if no retry is required. 624 static bool typesafe_seqlock_release(struct refscale_typesafe *rtsp, unsigned int start) 625 { 626 return !read_seqretry(&rtsp->rts_seqlock, start); 627 } 628 629 // Do a read-side critical section with the specified delay in 630 // microseconds and nanoseconds inserted so as to increase probability 631 // of failure. 632 static void typesafe_delay_section(const int nloops, const int udl, const int ndl) 633 { 634 unsigned int a; 635 unsigned int b; 636 int i; 637 long idx; 638 struct refscale_typesafe *rtsp; 639 unsigned int start; 640 641 for (i = nloops; i >= 0; i--) { 642 preempt_disable(); 643 idx = torture_random(this_cpu_ptr(&refscale_rand)) % rtsarray_size; 644 preempt_enable(); 645 retry: 646 rcu_read_lock(); 647 rtsp = rcu_dereference(rtsarray[idx]); 648 a = READ_ONCE(rtsp->a); 649 if (!rts_acquire(rtsp, &start)) { 650 rcu_read_unlock(); 651 goto retry; 652 } 653 if (a != READ_ONCE(rtsp->a)) { 654 (void)rts_release(rtsp, start); 655 rcu_read_unlock(); 656 goto retry; 657 } 658 un_delay(udl, ndl); 659 b = READ_ONCE(rtsp->a); 660 // Remember, seqlock read-side release can fail. 661 if (!rts_release(rtsp, start)) { 662 rcu_read_unlock(); 663 goto retry; 664 } 665 WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b); 666 b = rtsp->b; 667 rcu_read_unlock(); 668 WARN_ON_ONCE(a * a != b); 669 } 670 } 671 672 // Because the acquisition and release methods are expensive, there 673 // is no point in optimizing away the un_delay() function's two checks. 674 // Thus simply define typesafe_read_section() as a simple wrapper around 675 // typesafe_delay_section(). 676 static void typesafe_read_section(const int nloops) 677 { 678 typesafe_delay_section(nloops, 0, 0); 679 } 680 681 // Allocate and initialize one refscale_typesafe structure. 682 static struct refscale_typesafe *typesafe_alloc_one(void) 683 { 684 struct refscale_typesafe *rtsp; 685 686 rtsp = kmem_cache_alloc(typesafe_kmem_cachep, GFP_KERNEL); 687 if (!rtsp) 688 return NULL; 689 atomic_set(&rtsp->rts_refctr, 1); 690 WRITE_ONCE(rtsp->a, rtsp->a + 1); 691 WRITE_ONCE(rtsp->b, rtsp->a * rtsp->a); 692 return rtsp; 693 } 694 695 // Slab-allocator constructor for refscale_typesafe structures created 696 // out of a new slab of system memory. 697 static void refscale_typesafe_ctor(void *rtsp_in) 698 { 699 struct refscale_typesafe *rtsp = rtsp_in; 700 701 spin_lock_init(&rtsp->rts_lock); 702 seqlock_init(&rtsp->rts_seqlock); 703 preempt_disable(); 704 rtsp->a = torture_random(this_cpu_ptr(&refscale_rand)); 705 preempt_enable(); 706 } 707 708 static struct ref_scale_ops typesafe_ref_ops; 709 static struct ref_scale_ops typesafe_lock_ops; 710 static struct ref_scale_ops typesafe_seqlock_ops; 711 712 // Initialize for a typesafe test. 713 static bool typesafe_init(void) 714 { 715 long idx; 716 long si = lookup_instances; 717 718 typesafe_kmem_cachep = kmem_cache_create("refscale_typesafe", 719 sizeof(struct refscale_typesafe), sizeof(void *), 720 SLAB_TYPESAFE_BY_RCU, refscale_typesafe_ctor); 721 if (!typesafe_kmem_cachep) 722 return false; 723 if (si < 0) 724 si = -si * nr_cpu_ids; 725 else if (si == 0) 726 si = nr_cpu_ids; 727 rtsarray_size = si; 728 rtsarray = kcalloc(si, sizeof(*rtsarray), GFP_KERNEL); 729 if (!rtsarray) 730 return false; 731 for (idx = 0; idx < rtsarray_size; idx++) { 732 rtsarray[idx] = typesafe_alloc_one(); 733 if (!rtsarray[idx]) 734 return false; 735 } 736 if (cur_ops == &typesafe_ref_ops) { 737 rts_acquire = typesafe_ref_acquire; 738 rts_release = typesafe_ref_release; 739 } else if (cur_ops == &typesafe_lock_ops) { 740 rts_acquire = typesafe_lock_acquire; 741 rts_release = typesafe_lock_release; 742 } else if (cur_ops == &typesafe_seqlock_ops) { 743 rts_acquire = typesafe_seqlock_acquire; 744 rts_release = typesafe_seqlock_release; 745 } else { 746 WARN_ON_ONCE(1); 747 return false; 748 } 749 return true; 750 } 751 752 // Clean up after a typesafe test. 753 static void typesafe_cleanup(void) 754 { 755 long idx; 756 757 if (rtsarray) { 758 for (idx = 0; idx < rtsarray_size; idx++) 759 kmem_cache_free(typesafe_kmem_cachep, rtsarray[idx]); 760 kfree(rtsarray); 761 rtsarray = NULL; 762 rtsarray_size = 0; 763 } 764 kmem_cache_destroy(typesafe_kmem_cachep); 765 typesafe_kmem_cachep = NULL; 766 rts_acquire = NULL; 767 rts_release = NULL; 768 } 769 770 // The typesafe_init() function distinguishes these structures by address. 771 static struct ref_scale_ops typesafe_ref_ops = { 772 .init = typesafe_init, 773 .cleanup = typesafe_cleanup, 774 .readsection = typesafe_read_section, 775 .delaysection = typesafe_delay_section, 776 .name = "typesafe_ref" 777 }; 778 779 static struct ref_scale_ops typesafe_lock_ops = { 780 .init = typesafe_init, 781 .cleanup = typesafe_cleanup, 782 .readsection = typesafe_read_section, 783 .delaysection = typesafe_delay_section, 784 .name = "typesafe_lock" 785 }; 786 787 static struct ref_scale_ops typesafe_seqlock_ops = { 788 .init = typesafe_init, 789 .cleanup = typesafe_cleanup, 790 .readsection = typesafe_read_section, 791 .delaysection = typesafe_delay_section, 792 .name = "typesafe_seqlock" 793 }; 794 795 static void rcu_scale_one_reader(void) 796 { 797 if (readdelay <= 0) 798 cur_ops->readsection(loops); 799 else 800 cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000); 801 } 802 803 // Reader kthread. Repeatedly does empty RCU read-side 804 // critical section, minimizing update-side interference. 805 static int 806 ref_scale_reader(void *arg) 807 { 808 unsigned long flags; 809 long me = (long)arg; 810 struct reader_task *rt = &(reader_tasks[me]); 811 u64 start; 812 s64 duration; 813 814 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me); 815 WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids))); 816 set_user_nice(current, MAX_NICE); 817 atomic_inc(&n_init); 818 if (holdoff) 819 schedule_timeout_interruptible(holdoff * HZ); 820 repeat: 821 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id()); 822 823 // Wait for signal that this reader can start. 824 wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) || 825 torture_must_stop()); 826 827 if (torture_must_stop()) 828 goto end; 829 830 // Make sure that the CPU is affinitized appropriately during testing. 831 WARN_ON_ONCE(raw_smp_processor_id() != me); 832 833 WRITE_ONCE(rt->start_reader, 0); 834 if (!atomic_dec_return(&n_started)) 835 while (atomic_read_acquire(&n_started)) 836 cpu_relax(); 837 838 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx); 839 840 841 // To reduce noise, do an initial cache-warming invocation, check 842 // in, and then keep warming until everyone has checked in. 843 rcu_scale_one_reader(); 844 if (!atomic_dec_return(&n_warmedup)) 845 while (atomic_read_acquire(&n_warmedup)) 846 rcu_scale_one_reader(); 847 // Also keep interrupts disabled. This also has the effect 848 // of preventing entries into slow path for rcu_read_unlock(). 849 local_irq_save(flags); 850 start = ktime_get_mono_fast_ns(); 851 852 rcu_scale_one_reader(); 853 854 duration = ktime_get_mono_fast_ns() - start; 855 local_irq_restore(flags); 856 857 rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration; 858 // To reduce runtime-skew noise, do maintain-load invocations until 859 // everyone is done. 860 if (!atomic_dec_return(&n_cooleddown)) 861 while (atomic_read_acquire(&n_cooleddown)) 862 rcu_scale_one_reader(); 863 864 if (atomic_dec_and_test(&nreaders_exp)) 865 wake_up(&main_wq); 866 867 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)", 868 me, exp_idx, atomic_read(&nreaders_exp)); 869 870 if (!torture_must_stop()) 871 goto repeat; 872 end: 873 torture_kthread_stopping("ref_scale_reader"); 874 return 0; 875 } 876 877 static void reset_readers(void) 878 { 879 int i; 880 struct reader_task *rt; 881 882 for (i = 0; i < nreaders; i++) { 883 rt = &(reader_tasks[i]); 884 885 rt->last_duration_ns = 0; 886 } 887 } 888 889 // Print the results of each reader and return the sum of all their durations. 890 static u64 process_durations(int n) 891 { 892 int i; 893 struct reader_task *rt; 894 char buf1[64]; 895 char *buf; 896 u64 sum = 0; 897 898 buf = kmalloc(800 + 64, GFP_KERNEL); 899 if (!buf) 900 return 0; 901 buf[0] = 0; 902 sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)", 903 exp_idx); 904 905 for (i = 0; i < n && !torture_must_stop(); i++) { 906 rt = &(reader_tasks[i]); 907 sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns); 908 909 if (i % 5 == 0) 910 strcat(buf, "\n"); 911 if (strlen(buf) >= 800) { 912 pr_alert("%s", buf); 913 buf[0] = 0; 914 } 915 strcat(buf, buf1); 916 917 sum += rt->last_duration_ns; 918 } 919 pr_alert("%s\n", buf); 920 921 kfree(buf); 922 return sum; 923 } 924 925 // The main_func is the main orchestrator, it performs a bunch of 926 // experiments. For every experiment, it orders all the readers 927 // involved to start and waits for them to finish the experiment. It 928 // then reads their timestamps and starts the next experiment. Each 929 // experiment progresses from 1 concurrent reader to N of them at which 930 // point all the timestamps are printed. 931 static int main_func(void *arg) 932 { 933 int exp, r; 934 char buf1[64]; 935 char *buf; 936 u64 *result_avg; 937 938 set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids)); 939 set_user_nice(current, MAX_NICE); 940 941 VERBOSE_SCALEOUT("main_func task started"); 942 result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL); 943 buf = kzalloc(800 + 64, GFP_KERNEL); 944 if (!result_avg || !buf) { 945 SCALEOUT_ERRSTRING("out of memory"); 946 goto oom_exit; 947 } 948 if (holdoff) 949 schedule_timeout_interruptible(holdoff * HZ); 950 951 // Wait for all threads to start. 952 atomic_inc(&n_init); 953 while (atomic_read(&n_init) < nreaders + 1) 954 schedule_timeout_uninterruptible(1); 955 956 // Start exp readers up per experiment 957 for (exp = 0; exp < nruns && !torture_must_stop(); exp++) { 958 if (torture_must_stop()) 959 goto end; 960 961 reset_readers(); 962 atomic_set(&nreaders_exp, nreaders); 963 atomic_set(&n_started, nreaders); 964 atomic_set(&n_warmedup, nreaders); 965 atomic_set(&n_cooleddown, nreaders); 966 967 exp_idx = exp; 968 969 for (r = 0; r < nreaders; r++) { 970 smp_store_release(&reader_tasks[r].start_reader, 1); 971 wake_up(&reader_tasks[r].wq); 972 } 973 974 VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers", 975 nreaders); 976 977 wait_event(main_wq, 978 !atomic_read(&nreaders_exp) || torture_must_stop()); 979 980 VERBOSE_SCALEOUT("main_func: experiment ended"); 981 982 if (torture_must_stop()) 983 goto end; 984 985 result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops); 986 } 987 988 // Print the average of all experiments 989 SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n"); 990 991 pr_alert("Runs\tTime(ns)\n"); 992 for (exp = 0; exp < nruns; exp++) { 993 u64 avg; 994 u32 rem; 995 996 avg = div_u64_rem(result_avg[exp], 1000, &rem); 997 sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem); 998 strcat(buf, buf1); 999 if (strlen(buf) >= 800) { 1000 pr_alert("%s", buf); 1001 buf[0] = 0; 1002 } 1003 } 1004 1005 pr_alert("%s", buf); 1006 1007 oom_exit: 1008 // This will shutdown everything including us. 1009 if (shutdown) { 1010 shutdown_start = 1; 1011 wake_up(&shutdown_wq); 1012 } 1013 1014 // Wait for torture to stop us 1015 while (!torture_must_stop()) 1016 schedule_timeout_uninterruptible(1); 1017 1018 end: 1019 torture_kthread_stopping("main_func"); 1020 kfree(result_avg); 1021 kfree(buf); 1022 return 0; 1023 } 1024 1025 static void 1026 ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag) 1027 { 1028 pr_alert("%s" SCALE_FLAG 1029 "--- %s: verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag, 1030 verbose, verbose_batched, shutdown, holdoff, lookup_instances, loops, nreaders, nruns, readdelay); 1031 } 1032 1033 static void 1034 ref_scale_cleanup(void) 1035 { 1036 int i; 1037 1038 if (torture_cleanup_begin()) 1039 return; 1040 1041 if (!cur_ops) { 1042 torture_cleanup_end(); 1043 return; 1044 } 1045 1046 if (reader_tasks) { 1047 for (i = 0; i < nreaders; i++) 1048 torture_stop_kthread("ref_scale_reader", 1049 reader_tasks[i].task); 1050 } 1051 kfree(reader_tasks); 1052 1053 torture_stop_kthread("main_task", main_task); 1054 kfree(main_task); 1055 1056 // Do scale-type-specific cleanup operations. 1057 if (cur_ops->cleanup != NULL) 1058 cur_ops->cleanup(); 1059 1060 torture_cleanup_end(); 1061 } 1062 1063 // Shutdown kthread. Just waits to be awakened, then shuts down system. 1064 static int 1065 ref_scale_shutdown(void *arg) 1066 { 1067 wait_event_idle(shutdown_wq, shutdown_start); 1068 1069 smp_mb(); // Wake before output. 1070 ref_scale_cleanup(); 1071 kernel_power_off(); 1072 1073 return -EINVAL; 1074 } 1075 1076 static int __init 1077 ref_scale_init(void) 1078 { 1079 long i; 1080 int firsterr = 0; 1081 static struct ref_scale_ops *scale_ops[] = { 1082 &rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops, 1083 &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops, 1084 &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops, 1085 }; 1086 1087 if (!torture_init_begin(scale_type, verbose)) 1088 return -EBUSY; 1089 1090 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) { 1091 cur_ops = scale_ops[i]; 1092 if (strcmp(scale_type, cur_ops->name) == 0) 1093 break; 1094 } 1095 if (i == ARRAY_SIZE(scale_ops)) { 1096 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type); 1097 pr_alert("rcu-scale types:"); 1098 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) 1099 pr_cont(" %s", scale_ops[i]->name); 1100 pr_cont("\n"); 1101 firsterr = -EINVAL; 1102 cur_ops = NULL; 1103 goto unwind; 1104 } 1105 if (cur_ops->init) 1106 if (!cur_ops->init()) { 1107 firsterr = -EUCLEAN; 1108 goto unwind; 1109 } 1110 1111 ref_scale_print_module_parms(cur_ops, "Start of test"); 1112 1113 // Shutdown task 1114 if (shutdown) { 1115 init_waitqueue_head(&shutdown_wq); 1116 firsterr = torture_create_kthread(ref_scale_shutdown, NULL, 1117 shutdown_task); 1118 if (torture_init_error(firsterr)) 1119 goto unwind; 1120 schedule_timeout_uninterruptible(1); 1121 } 1122 1123 // Reader tasks (default to ~75% of online CPUs). 1124 if (nreaders < 0) 1125 nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2); 1126 if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops)) 1127 loops = 1; 1128 if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders)) 1129 nreaders = 1; 1130 if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns)) 1131 nruns = 1; 1132 reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]), 1133 GFP_KERNEL); 1134 if (!reader_tasks) { 1135 SCALEOUT_ERRSTRING("out of memory"); 1136 firsterr = -ENOMEM; 1137 goto unwind; 1138 } 1139 1140 VERBOSE_SCALEOUT("Starting %d reader threads", nreaders); 1141 1142 for (i = 0; i < nreaders; i++) { 1143 init_waitqueue_head(&reader_tasks[i].wq); 1144 firsterr = torture_create_kthread(ref_scale_reader, (void *)i, 1145 reader_tasks[i].task); 1146 if (torture_init_error(firsterr)) 1147 goto unwind; 1148 } 1149 1150 // Main Task 1151 init_waitqueue_head(&main_wq); 1152 firsterr = torture_create_kthread(main_func, NULL, main_task); 1153 if (torture_init_error(firsterr)) 1154 goto unwind; 1155 1156 torture_init_end(); 1157 return 0; 1158 1159 unwind: 1160 torture_init_end(); 1161 ref_scale_cleanup(); 1162 if (shutdown) { 1163 WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST)); 1164 kernel_power_off(); 1165 } 1166 return firsterr; 1167 } 1168 1169 module_init(ref_scale_init); 1170 module_exit(ref_scale_cleanup); 1171
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.