1 // SPDX-License-Identifier: GPL-2.0+ 1 // SPDX-License-Identifier: GPL-2.0+ 2 // 2 // 3 // Scalability test comparing RCU vs other mec 3 // Scalability test comparing RCU vs other mechanisms 4 // for acquiring references on objects. 4 // for acquiring references on objects. 5 // 5 // 6 // Copyright (C) Google, 2020. 6 // Copyright (C) Google, 2020. 7 // 7 // 8 // Author: Joel Fernandes <joel@joelfernandes. 8 // Author: Joel Fernandes <joel@joelfernandes.org> 9 9 10 #define pr_fmt(fmt) fmt 10 #define pr_fmt(fmt) fmt 11 11 12 #include <linux/atomic.h> 12 #include <linux/atomic.h> 13 #include <linux/bitops.h> 13 #include <linux/bitops.h> 14 #include <linux/completion.h> 14 #include <linux/completion.h> 15 #include <linux/cpu.h> 15 #include <linux/cpu.h> 16 #include <linux/delay.h> 16 #include <linux/delay.h> 17 #include <linux/err.h> 17 #include <linux/err.h> 18 #include <linux/init.h> 18 #include <linux/init.h> 19 #include <linux/interrupt.h> 19 #include <linux/interrupt.h> 20 #include <linux/kthread.h> 20 #include <linux/kthread.h> 21 #include <linux/kernel.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 24 #include <linux/moduleparam.h> 25 #include <linux/notifier.h> 25 #include <linux/notifier.h> 26 #include <linux/percpu.h> 26 #include <linux/percpu.h> 27 #include <linux/rcupdate.h> 27 #include <linux/rcupdate.h> 28 #include <linux/rcupdate_trace.h> 28 #include <linux/rcupdate_trace.h> 29 #include <linux/reboot.h> 29 #include <linux/reboot.h> 30 #include <linux/sched.h> 30 #include <linux/sched.h> 31 #include <linux/seq_buf.h> << 32 #include <linux/spinlock.h> 31 #include <linux/spinlock.h> 33 #include <linux/smp.h> 32 #include <linux/smp.h> 34 #include <linux/stat.h> 33 #include <linux/stat.h> 35 #include <linux/srcu.h> 34 #include <linux/srcu.h> 36 #include <linux/slab.h> 35 #include <linux/slab.h> 37 #include <linux/torture.h> 36 #include <linux/torture.h> 38 #include <linux/types.h> 37 #include <linux/types.h> 39 38 40 #include "rcu.h" 39 #include "rcu.h" 41 40 42 #define SCALE_FLAG "-ref-scale: " 41 #define SCALE_FLAG "-ref-scale: " 43 42 44 #define SCALEOUT(s, x...) \ 43 #define SCALEOUT(s, x...) \ 45 pr_alert("%s" SCALE_FLAG s, scale_type 44 pr_alert("%s" SCALE_FLAG s, scale_type, ## x) 46 45 47 #define VERBOSE_SCALEOUT(s, x...) \ 46 #define VERBOSE_SCALEOUT(s, x...) \ 48 do { \ 47 do { \ 49 if (verbose) \ 48 if (verbose) \ 50 pr_alert("%s" SCALE_FL 49 pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \ 51 } while (0) 50 } while (0) 52 51 53 static atomic_t verbose_batch_ctr; 52 static atomic_t verbose_batch_ctr; 54 53 55 #define VERBOSE_SCALEOUT_BATCH(s, x...) 54 #define VERBOSE_SCALEOUT_BATCH(s, x...) \ 56 do { 55 do { \ 57 if (verbose && 56 if (verbose && \ 58 (verbose_batched <= 0 || 57 (verbose_batched <= 0 || \ 59 !(atomic_inc_return(&verbose_batc 58 !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \ 60 schedule_timeout_uninterruptib 59 schedule_timeout_uninterruptible(1); \ 61 pr_alert("%s" SCALE_FLAG s "\n 60 pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \ 62 } 61 } \ 63 } while (0) 62 } while (0) 64 63 65 #define SCALEOUT_ERRSTRING(s, x...) pr_alert(" 64 #define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x) 66 65 67 MODULE_DESCRIPTION("Scalability test for objec << 68 MODULE_LICENSE("GPL"); 66 MODULE_LICENSE("GPL"); 69 MODULE_AUTHOR("Joel Fernandes (Google) <joel@j 67 MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>"); 70 68 71 static char *scale_type = "rcu"; 69 static char *scale_type = "rcu"; 72 module_param(scale_type, charp, 0444); 70 module_param(scale_type, charp, 0444); 73 MODULE_PARM_DESC(scale_type, "Type of test (rc 71 MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock."); 74 72 75 torture_param(int, verbose, 0, "Enable verbose 73 torture_param(int, verbose, 0, "Enable verbose debugging printk()s"); 76 torture_param(int, verbose_batched, 0, "Batch 74 torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s"); 77 75 78 // Wait until there are multiple CPUs before s 76 // Wait until there are multiple CPUs before starting test. 79 torture_param(int, holdoff, IS_BUILTIN(CONFIG_ 77 torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0, 80 "Holdoff time before test start 78 "Holdoff time before test start (s)"); 81 // Number of typesafe_lookup structures, that 79 // Number of typesafe_lookup structures, that is, the degree of concurrency. 82 torture_param(long, lookup_instances, 0, "Numb 80 torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures."); 83 // Number of loops per experiment, all readers 81 // Number of loops per experiment, all readers execute operations concurrently. 84 torture_param(long, loops, 10000, "Number of l 82 torture_param(long, loops, 10000, "Number of loops per experiment."); 85 // Number of readers, with -1 defaulting to ab 83 // Number of readers, with -1 defaulting to about 75% of the CPUs. 86 torture_param(int, nreaders, -1, "Number of re 84 torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs."); 87 // Number of runs. 85 // Number of runs. 88 torture_param(int, nruns, 30, "Number of exper 86 torture_param(int, nruns, 30, "Number of experiments to run."); 89 // Reader delay in nanoseconds, 0 for no delay 87 // Reader delay in nanoseconds, 0 for no delay. 90 torture_param(int, readdelay, 0, "Read-side de 88 torture_param(int, readdelay, 0, "Read-side delay in nanoseconds."); 91 89 92 #ifdef MODULE 90 #ifdef MODULE 93 # define REFSCALE_SHUTDOWN 0 91 # define REFSCALE_SHUTDOWN 0 94 #else 92 #else 95 # define REFSCALE_SHUTDOWN 1 93 # define REFSCALE_SHUTDOWN 1 96 #endif 94 #endif 97 95 98 torture_param(bool, shutdown, REFSCALE_SHUTDOW 96 torture_param(bool, shutdown, REFSCALE_SHUTDOWN, 99 "Shutdown at end of scalability 97 "Shutdown at end of scalability tests."); 100 98 101 struct reader_task { 99 struct reader_task { 102 struct task_struct *task; 100 struct task_struct *task; 103 int start_reader; 101 int start_reader; 104 wait_queue_head_t wq; 102 wait_queue_head_t wq; 105 u64 last_duration_ns; 103 u64 last_duration_ns; 106 }; 104 }; 107 105 108 static struct task_struct *shutdown_task; 106 static struct task_struct *shutdown_task; 109 static wait_queue_head_t shutdown_wq; 107 static wait_queue_head_t shutdown_wq; 110 108 111 static struct task_struct *main_task; 109 static struct task_struct *main_task; 112 static wait_queue_head_t main_wq; 110 static wait_queue_head_t main_wq; 113 static int shutdown_start; 111 static int shutdown_start; 114 112 115 static struct reader_task *reader_tasks; 113 static struct reader_task *reader_tasks; 116 114 117 // Number of readers that are part of the curr 115 // Number of readers that are part of the current experiment. 118 static atomic_t nreaders_exp; 116 static atomic_t nreaders_exp; 119 117 120 // Use to wait for all threads to start. 118 // Use to wait for all threads to start. 121 static atomic_t n_init; 119 static atomic_t n_init; 122 static atomic_t n_started; 120 static atomic_t n_started; 123 static atomic_t n_warmedup; 121 static atomic_t n_warmedup; 124 static atomic_t n_cooleddown; 122 static atomic_t n_cooleddown; 125 123 126 // Track which experiment is currently running 124 // Track which experiment is currently running. 127 static int exp_idx; 125 static int exp_idx; 128 126 129 // Operations vector for selecting different t 127 // Operations vector for selecting different types of tests. 130 struct ref_scale_ops { 128 struct ref_scale_ops { 131 bool (*init)(void); 129 bool (*init)(void); 132 void (*cleanup)(void); 130 void (*cleanup)(void); 133 void (*readsection)(const int nloops); 131 void (*readsection)(const int nloops); 134 void (*delaysection)(const int nloops, 132 void (*delaysection)(const int nloops, const int udl, const int ndl); 135 const char *name; 133 const char *name; 136 }; 134 }; 137 135 138 static const struct ref_scale_ops *cur_ops; !! 136 static struct ref_scale_ops *cur_ops; 139 137 140 static void un_delay(const int udl, const int 138 static void un_delay(const int udl, const int ndl) 141 { 139 { 142 if (udl) 140 if (udl) 143 udelay(udl); 141 udelay(udl); 144 if (ndl) 142 if (ndl) 145 ndelay(ndl); 143 ndelay(ndl); 146 } 144 } 147 145 148 static void ref_rcu_read_section(const int nlo 146 static void ref_rcu_read_section(const int nloops) 149 { 147 { 150 int i; 148 int i; 151 149 152 for (i = nloops; i >= 0; i--) { 150 for (i = nloops; i >= 0; i--) { 153 rcu_read_lock(); 151 rcu_read_lock(); 154 rcu_read_unlock(); 152 rcu_read_unlock(); 155 } 153 } 156 } 154 } 157 155 158 static void ref_rcu_delay_section(const int nl 156 static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl) 159 { 157 { 160 int i; 158 int i; 161 159 162 for (i = nloops; i >= 0; i--) { 160 for (i = nloops; i >= 0; i--) { 163 rcu_read_lock(); 161 rcu_read_lock(); 164 un_delay(udl, ndl); 162 un_delay(udl, ndl); 165 rcu_read_unlock(); 163 rcu_read_unlock(); 166 } 164 } 167 } 165 } 168 166 169 static bool rcu_sync_scale_init(void) 167 static bool rcu_sync_scale_init(void) 170 { 168 { 171 return true; 169 return true; 172 } 170 } 173 171 174 static const struct ref_scale_ops rcu_ops = { !! 172 static struct ref_scale_ops rcu_ops = { 175 .init = rcu_sync_scale_init, 173 .init = rcu_sync_scale_init, 176 .readsection = ref_rcu_read_section 174 .readsection = ref_rcu_read_section, 177 .delaysection = ref_rcu_delay_sectio 175 .delaysection = ref_rcu_delay_section, 178 .name = "rcu" 176 .name = "rcu" 179 }; 177 }; 180 178 181 // Definitions for SRCU ref scale testing. 179 // Definitions for SRCU ref scale testing. 182 DEFINE_STATIC_SRCU(srcu_refctl_scale); 180 DEFINE_STATIC_SRCU(srcu_refctl_scale); 183 static struct srcu_struct *srcu_ctlp = &srcu_r 181 static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale; 184 182 185 static void srcu_ref_scale_read_section(const 183 static void srcu_ref_scale_read_section(const int nloops) 186 { 184 { 187 int i; 185 int i; 188 int idx; 186 int idx; 189 187 190 for (i = nloops; i >= 0; i--) { 188 for (i = nloops; i >= 0; i--) { 191 idx = srcu_read_lock(srcu_ctlp 189 idx = srcu_read_lock(srcu_ctlp); 192 srcu_read_unlock(srcu_ctlp, id 190 srcu_read_unlock(srcu_ctlp, idx); 193 } 191 } 194 } 192 } 195 193 196 static void srcu_ref_scale_delay_section(const 194 static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl) 197 { 195 { 198 int i; 196 int i; 199 int idx; 197 int idx; 200 198 201 for (i = nloops; i >= 0; i--) { 199 for (i = nloops; i >= 0; i--) { 202 idx = srcu_read_lock(srcu_ctlp 200 idx = srcu_read_lock(srcu_ctlp); 203 un_delay(udl, ndl); 201 un_delay(udl, ndl); 204 srcu_read_unlock(srcu_ctlp, id 202 srcu_read_unlock(srcu_ctlp, idx); 205 } 203 } 206 } 204 } 207 205 208 static const struct ref_scale_ops srcu_ops = { !! 206 static struct ref_scale_ops srcu_ops = { 209 .init = rcu_sync_scale_init, 207 .init = rcu_sync_scale_init, 210 .readsection = srcu_ref_scale_read_ 208 .readsection = srcu_ref_scale_read_section, 211 .delaysection = srcu_ref_scale_delay 209 .delaysection = srcu_ref_scale_delay_section, 212 .name = "srcu" 210 .name = "srcu" 213 }; 211 }; 214 212 215 #ifdef CONFIG_TASKS_RCU 213 #ifdef CONFIG_TASKS_RCU 216 214 217 // Definitions for RCU Tasks ref scale testing 215 // Definitions for RCU Tasks ref scale testing: Empty read markers. 218 // These definitions also work for RCU Rude re 216 // These definitions also work for RCU Rude readers. 219 static void rcu_tasks_ref_scale_read_section(c 217 static void rcu_tasks_ref_scale_read_section(const int nloops) 220 { 218 { 221 int i; 219 int i; 222 220 223 for (i = nloops; i >= 0; i--) 221 for (i = nloops; i >= 0; i--) 224 continue; 222 continue; 225 } 223 } 226 224 227 static void rcu_tasks_ref_scale_delay_section( 225 static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl) 228 { 226 { 229 int i; 227 int i; 230 228 231 for (i = nloops; i >= 0; i--) 229 for (i = nloops; i >= 0; i--) 232 un_delay(udl, ndl); 230 un_delay(udl, ndl); 233 } 231 } 234 232 235 static const struct ref_scale_ops rcu_tasks_op !! 233 static struct ref_scale_ops rcu_tasks_ops = { 236 .init = rcu_sync_scale_init, 234 .init = rcu_sync_scale_init, 237 .readsection = rcu_tasks_ref_scale_ 235 .readsection = rcu_tasks_ref_scale_read_section, 238 .delaysection = rcu_tasks_ref_scale_ 236 .delaysection = rcu_tasks_ref_scale_delay_section, 239 .name = "rcu-tasks" 237 .name = "rcu-tasks" 240 }; 238 }; 241 239 242 #define RCU_TASKS_OPS &rcu_tasks_ops, 240 #define RCU_TASKS_OPS &rcu_tasks_ops, 243 241 244 #else // #ifdef CONFIG_TASKS_RCU 242 #else // #ifdef CONFIG_TASKS_RCU 245 243 246 #define RCU_TASKS_OPS 244 #define RCU_TASKS_OPS 247 245 248 #endif // #else // #ifdef CONFIG_TASKS_RCU 246 #endif // #else // #ifdef CONFIG_TASKS_RCU 249 247 250 #ifdef CONFIG_TASKS_TRACE_RCU 248 #ifdef CONFIG_TASKS_TRACE_RCU 251 249 252 // Definitions for RCU Tasks Trace ref scale t 250 // Definitions for RCU Tasks Trace ref scale testing. 253 static void rcu_trace_ref_scale_read_section(c 251 static void rcu_trace_ref_scale_read_section(const int nloops) 254 { 252 { 255 int i; 253 int i; 256 254 257 for (i = nloops; i >= 0; i--) { 255 for (i = nloops; i >= 0; i--) { 258 rcu_read_lock_trace(); 256 rcu_read_lock_trace(); 259 rcu_read_unlock_trace(); 257 rcu_read_unlock_trace(); 260 } 258 } 261 } 259 } 262 260 263 static void rcu_trace_ref_scale_delay_section( 261 static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl) 264 { 262 { 265 int i; 263 int i; 266 264 267 for (i = nloops; i >= 0; i--) { 265 for (i = nloops; i >= 0; i--) { 268 rcu_read_lock_trace(); 266 rcu_read_lock_trace(); 269 un_delay(udl, ndl); 267 un_delay(udl, ndl); 270 rcu_read_unlock_trace(); 268 rcu_read_unlock_trace(); 271 } 269 } 272 } 270 } 273 271 274 static const struct ref_scale_ops rcu_trace_op !! 272 static struct ref_scale_ops rcu_trace_ops = { 275 .init = rcu_sync_scale_init, 273 .init = rcu_sync_scale_init, 276 .readsection = rcu_trace_ref_scale_ 274 .readsection = rcu_trace_ref_scale_read_section, 277 .delaysection = rcu_trace_ref_scale_ 275 .delaysection = rcu_trace_ref_scale_delay_section, 278 .name = "rcu-trace" 276 .name = "rcu-trace" 279 }; 277 }; 280 278 281 #define RCU_TRACE_OPS &rcu_trace_ops, 279 #define RCU_TRACE_OPS &rcu_trace_ops, 282 280 283 #else // #ifdef CONFIG_TASKS_TRACE_RCU 281 #else // #ifdef CONFIG_TASKS_TRACE_RCU 284 282 285 #define RCU_TRACE_OPS 283 #define RCU_TRACE_OPS 286 284 287 #endif // #else // #ifdef CONFIG_TASKS_TRACE_R 285 #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU 288 286 289 // Definitions for reference count 287 // Definitions for reference count 290 static atomic_t refcnt; 288 static atomic_t refcnt; 291 289 292 static void ref_refcnt_section(const int nloop 290 static void ref_refcnt_section(const int nloops) 293 { 291 { 294 int i; 292 int i; 295 293 296 for (i = nloops; i >= 0; i--) { 294 for (i = nloops; i >= 0; i--) { 297 atomic_inc(&refcnt); 295 atomic_inc(&refcnt); 298 atomic_dec(&refcnt); 296 atomic_dec(&refcnt); 299 } 297 } 300 } 298 } 301 299 302 static void ref_refcnt_delay_section(const int 300 static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl) 303 { 301 { 304 int i; 302 int i; 305 303 306 for (i = nloops; i >= 0; i--) { 304 for (i = nloops; i >= 0; i--) { 307 atomic_inc(&refcnt); 305 atomic_inc(&refcnt); 308 un_delay(udl, ndl); 306 un_delay(udl, ndl); 309 atomic_dec(&refcnt); 307 atomic_dec(&refcnt); 310 } 308 } 311 } 309 } 312 310 313 static const struct ref_scale_ops refcnt_ops = !! 311 static struct ref_scale_ops refcnt_ops = { 314 .init = rcu_sync_scale_init, 312 .init = rcu_sync_scale_init, 315 .readsection = ref_refcnt_section, 313 .readsection = ref_refcnt_section, 316 .delaysection = ref_refcnt_delay_sec 314 .delaysection = ref_refcnt_delay_section, 317 .name = "refcnt" 315 .name = "refcnt" 318 }; 316 }; 319 317 320 // Definitions for rwlock 318 // Definitions for rwlock 321 static rwlock_t test_rwlock; 319 static rwlock_t test_rwlock; 322 320 323 static bool ref_rwlock_init(void) 321 static bool ref_rwlock_init(void) 324 { 322 { 325 rwlock_init(&test_rwlock); 323 rwlock_init(&test_rwlock); 326 return true; 324 return true; 327 } 325 } 328 326 329 static void ref_rwlock_section(const int nloop 327 static void ref_rwlock_section(const int nloops) 330 { 328 { 331 int i; 329 int i; 332 330 333 for (i = nloops; i >= 0; i--) { 331 for (i = nloops; i >= 0; i--) { 334 read_lock(&test_rwlock); 332 read_lock(&test_rwlock); 335 read_unlock(&test_rwlock); 333 read_unlock(&test_rwlock); 336 } 334 } 337 } 335 } 338 336 339 static void ref_rwlock_delay_section(const int 337 static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl) 340 { 338 { 341 int i; 339 int i; 342 340 343 for (i = nloops; i >= 0; i--) { 341 for (i = nloops; i >= 0; i--) { 344 read_lock(&test_rwlock); 342 read_lock(&test_rwlock); 345 un_delay(udl, ndl); 343 un_delay(udl, ndl); 346 read_unlock(&test_rwlock); 344 read_unlock(&test_rwlock); 347 } 345 } 348 } 346 } 349 347 350 static const struct ref_scale_ops rwlock_ops = !! 348 static struct ref_scale_ops rwlock_ops = { 351 .init = ref_rwlock_init, 349 .init = ref_rwlock_init, 352 .readsection = ref_rwlock_section, 350 .readsection = ref_rwlock_section, 353 .delaysection = ref_rwlock_delay_sec 351 .delaysection = ref_rwlock_delay_section, 354 .name = "rwlock" 352 .name = "rwlock" 355 }; 353 }; 356 354 357 // Definitions for rwsem 355 // Definitions for rwsem 358 static struct rw_semaphore test_rwsem; 356 static struct rw_semaphore test_rwsem; 359 357 360 static bool ref_rwsem_init(void) 358 static bool ref_rwsem_init(void) 361 { 359 { 362 init_rwsem(&test_rwsem); 360 init_rwsem(&test_rwsem); 363 return true; 361 return true; 364 } 362 } 365 363 366 static void ref_rwsem_section(const int nloops 364 static void ref_rwsem_section(const int nloops) 367 { 365 { 368 int i; 366 int i; 369 367 370 for (i = nloops; i >= 0; i--) { 368 for (i = nloops; i >= 0; i--) { 371 down_read(&test_rwsem); 369 down_read(&test_rwsem); 372 up_read(&test_rwsem); 370 up_read(&test_rwsem); 373 } 371 } 374 } 372 } 375 373 376 static void ref_rwsem_delay_section(const int 374 static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl) 377 { 375 { 378 int i; 376 int i; 379 377 380 for (i = nloops; i >= 0; i--) { 378 for (i = nloops; i >= 0; i--) { 381 down_read(&test_rwsem); 379 down_read(&test_rwsem); 382 un_delay(udl, ndl); 380 un_delay(udl, ndl); 383 up_read(&test_rwsem); 381 up_read(&test_rwsem); 384 } 382 } 385 } 383 } 386 384 387 static const struct ref_scale_ops rwsem_ops = !! 385 static struct ref_scale_ops rwsem_ops = { 388 .init = ref_rwsem_init, 386 .init = ref_rwsem_init, 389 .readsection = ref_rwsem_section, 387 .readsection = ref_rwsem_section, 390 .delaysection = ref_rwsem_delay_sect 388 .delaysection = ref_rwsem_delay_section, 391 .name = "rwsem" 389 .name = "rwsem" 392 }; 390 }; 393 391 394 // Definitions for global spinlock 392 // Definitions for global spinlock 395 static DEFINE_RAW_SPINLOCK(test_lock); 393 static DEFINE_RAW_SPINLOCK(test_lock); 396 394 397 static void ref_lock_section(const int nloops) 395 static void ref_lock_section(const int nloops) 398 { 396 { 399 int i; 397 int i; 400 398 401 preempt_disable(); 399 preempt_disable(); 402 for (i = nloops; i >= 0; i--) { 400 for (i = nloops; i >= 0; i--) { 403 raw_spin_lock(&test_lock); 401 raw_spin_lock(&test_lock); 404 raw_spin_unlock(&test_lock); 402 raw_spin_unlock(&test_lock); 405 } 403 } 406 preempt_enable(); 404 preempt_enable(); 407 } 405 } 408 406 409 static void ref_lock_delay_section(const int n 407 static void ref_lock_delay_section(const int nloops, const int udl, const int ndl) 410 { 408 { 411 int i; 409 int i; 412 410 413 preempt_disable(); 411 preempt_disable(); 414 for (i = nloops; i >= 0; i--) { 412 for (i = nloops; i >= 0; i--) { 415 raw_spin_lock(&test_lock); 413 raw_spin_lock(&test_lock); 416 un_delay(udl, ndl); 414 un_delay(udl, ndl); 417 raw_spin_unlock(&test_lock); 415 raw_spin_unlock(&test_lock); 418 } 416 } 419 preempt_enable(); 417 preempt_enable(); 420 } 418 } 421 419 422 static const struct ref_scale_ops lock_ops = { !! 420 static struct ref_scale_ops lock_ops = { 423 .readsection = ref_lock_section, 421 .readsection = ref_lock_section, 424 .delaysection = ref_lock_delay_secti 422 .delaysection = ref_lock_delay_section, 425 .name = "lock" 423 .name = "lock" 426 }; 424 }; 427 425 428 // Definitions for global irq-save spinlock 426 // Definitions for global irq-save spinlock 429 427 430 static void ref_lock_irq_section(const int nlo 428 static void ref_lock_irq_section(const int nloops) 431 { 429 { 432 unsigned long flags; 430 unsigned long flags; 433 int i; 431 int i; 434 432 435 preempt_disable(); 433 preempt_disable(); 436 for (i = nloops; i >= 0; i--) { 434 for (i = nloops; i >= 0; i--) { 437 raw_spin_lock_irqsave(&test_lo 435 raw_spin_lock_irqsave(&test_lock, flags); 438 raw_spin_unlock_irqrestore(&te 436 raw_spin_unlock_irqrestore(&test_lock, flags); 439 } 437 } 440 preempt_enable(); 438 preempt_enable(); 441 } 439 } 442 440 443 static void ref_lock_irq_delay_section(const i 441 static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl) 444 { 442 { 445 unsigned long flags; 443 unsigned long flags; 446 int i; 444 int i; 447 445 448 preempt_disable(); 446 preempt_disable(); 449 for (i = nloops; i >= 0; i--) { 447 for (i = nloops; i >= 0; i--) { 450 raw_spin_lock_irqsave(&test_lo 448 raw_spin_lock_irqsave(&test_lock, flags); 451 un_delay(udl, ndl); 449 un_delay(udl, ndl); 452 raw_spin_unlock_irqrestore(&te 450 raw_spin_unlock_irqrestore(&test_lock, flags); 453 } 451 } 454 preempt_enable(); 452 preempt_enable(); 455 } 453 } 456 454 457 static const struct ref_scale_ops lock_irq_ops !! 455 static struct ref_scale_ops lock_irq_ops = { 458 .readsection = ref_lock_irq_section 456 .readsection = ref_lock_irq_section, 459 .delaysection = ref_lock_irq_delay_s 457 .delaysection = ref_lock_irq_delay_section, 460 .name = "lock-irq" 458 .name = "lock-irq" 461 }; 459 }; 462 460 463 // Definitions acquire-release. 461 // Definitions acquire-release. 464 static DEFINE_PER_CPU(unsigned long, test_acqr 462 static DEFINE_PER_CPU(unsigned long, test_acqrel); 465 463 466 static void ref_acqrel_section(const int nloop 464 static void ref_acqrel_section(const int nloops) 467 { 465 { 468 unsigned long x; 466 unsigned long x; 469 int i; 467 int i; 470 468 471 preempt_disable(); 469 preempt_disable(); 472 for (i = nloops; i >= 0; i--) { 470 for (i = nloops; i >= 0; i--) { 473 x = smp_load_acquire(this_cpu_ 471 x = smp_load_acquire(this_cpu_ptr(&test_acqrel)); 474 smp_store_release(this_cpu_ptr 472 smp_store_release(this_cpu_ptr(&test_acqrel), x + 1); 475 } 473 } 476 preempt_enable(); 474 preempt_enable(); 477 } 475 } 478 476 479 static void ref_acqrel_delay_section(const int 477 static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl) 480 { 478 { 481 unsigned long x; 479 unsigned long x; 482 int i; 480 int i; 483 481 484 preempt_disable(); 482 preempt_disable(); 485 for (i = nloops; i >= 0; i--) { 483 for (i = nloops; i >= 0; i--) { 486 x = smp_load_acquire(this_cpu_ 484 x = smp_load_acquire(this_cpu_ptr(&test_acqrel)); 487 un_delay(udl, ndl); 485 un_delay(udl, ndl); 488 smp_store_release(this_cpu_ptr 486 smp_store_release(this_cpu_ptr(&test_acqrel), x + 1); 489 } 487 } 490 preempt_enable(); 488 preempt_enable(); 491 } 489 } 492 490 493 static const struct ref_scale_ops acqrel_ops = !! 491 static struct ref_scale_ops acqrel_ops = { 494 .readsection = ref_acqrel_section, 492 .readsection = ref_acqrel_section, 495 .delaysection = ref_acqrel_delay_sec 493 .delaysection = ref_acqrel_delay_section, 496 .name = "acqrel" 494 .name = "acqrel" 497 }; 495 }; 498 496 499 static volatile u64 stopopts; 497 static volatile u64 stopopts; 500 498 501 static void ref_clock_section(const int nloops 499 static void ref_clock_section(const int nloops) 502 { 500 { 503 u64 x = 0; 501 u64 x = 0; 504 int i; 502 int i; 505 503 506 preempt_disable(); 504 preempt_disable(); 507 for (i = nloops; i >= 0; i--) 505 for (i = nloops; i >= 0; i--) 508 x += ktime_get_real_fast_ns(); 506 x += ktime_get_real_fast_ns(); 509 preempt_enable(); 507 preempt_enable(); 510 stopopts = x; 508 stopopts = x; 511 } 509 } 512 510 513 static void ref_clock_delay_section(const int 511 static void ref_clock_delay_section(const int nloops, const int udl, const int ndl) 514 { 512 { 515 u64 x = 0; 513 u64 x = 0; 516 int i; 514 int i; 517 515 518 preempt_disable(); 516 preempt_disable(); 519 for (i = nloops; i >= 0; i--) { 517 for (i = nloops; i >= 0; i--) { 520 x += ktime_get_real_fast_ns(); 518 x += ktime_get_real_fast_ns(); 521 un_delay(udl, ndl); 519 un_delay(udl, ndl); 522 } 520 } 523 preempt_enable(); 521 preempt_enable(); 524 stopopts = x; 522 stopopts = x; 525 } 523 } 526 524 527 static const struct ref_scale_ops clock_ops = !! 525 static struct ref_scale_ops clock_ops = { 528 .readsection = ref_clock_section, 526 .readsection = ref_clock_section, 529 .delaysection = ref_clock_delay_sect 527 .delaysection = ref_clock_delay_section, 530 .name = "clock" 528 .name = "clock" 531 }; 529 }; 532 530 533 static void ref_jiffies_section(const int nloo << 534 { << 535 u64 x = 0; << 536 int i; << 537 << 538 preempt_disable(); << 539 for (i = nloops; i >= 0; i--) << 540 x += jiffies; << 541 preempt_enable(); << 542 stopopts = x; << 543 } << 544 << 545 static void ref_jiffies_delay_section(const in << 546 { << 547 u64 x = 0; << 548 int i; << 549 << 550 preempt_disable(); << 551 for (i = nloops; i >= 0; i--) { << 552 x += jiffies; << 553 un_delay(udl, ndl); << 554 } << 555 preempt_enable(); << 556 stopopts = x; << 557 } << 558 << 559 static const struct ref_scale_ops jiffies_ops << 560 .readsection = ref_jiffies_section, << 561 .delaysection = ref_jiffies_delay_se << 562 .name = "jiffies" << 563 }; << 564 << 565 ////////////////////////////////////////////// 531 //////////////////////////////////////////////////////////////////////// 566 // 532 // 567 // Methods leveraging SLAB_TYPESAFE_BY_RCU. 533 // Methods leveraging SLAB_TYPESAFE_BY_RCU. 568 // 534 // 569 535 570 // Item to look up in a typesafe manner. Arra 536 // Item to look up in a typesafe manner. Array of pointers to these. 571 struct refscale_typesafe { 537 struct refscale_typesafe { 572 atomic_t rts_refctr; // Used by all f 538 atomic_t rts_refctr; // Used by all flavors 573 spinlock_t rts_lock; 539 spinlock_t rts_lock; 574 seqlock_t rts_seqlock; 540 seqlock_t rts_seqlock; 575 unsigned int a; 541 unsigned int a; 576 unsigned int b; 542 unsigned int b; 577 }; 543 }; 578 544 579 static struct kmem_cache *typesafe_kmem_cachep 545 static struct kmem_cache *typesafe_kmem_cachep; 580 static struct refscale_typesafe **rtsarray; 546 static struct refscale_typesafe **rtsarray; 581 static long rtsarray_size; 547 static long rtsarray_size; 582 static DEFINE_TORTURE_RANDOM_PERCPU(refscale_r 548 static DEFINE_TORTURE_RANDOM_PERCPU(refscale_rand); 583 static bool (*rts_acquire)(struct refscale_typ 549 static bool (*rts_acquire)(struct refscale_typesafe *rtsp, unsigned int *start); 584 static bool (*rts_release)(struct refscale_typ 550 static bool (*rts_release)(struct refscale_typesafe *rtsp, unsigned int start); 585 551 586 // Conditionally acquire an explicit in-struct 552 // Conditionally acquire an explicit in-structure reference count. 587 static bool typesafe_ref_acquire(struct refsca 553 static bool typesafe_ref_acquire(struct refscale_typesafe *rtsp, unsigned int *start) 588 { 554 { 589 return atomic_inc_not_zero(&rtsp->rts_ 555 return atomic_inc_not_zero(&rtsp->rts_refctr); 590 } 556 } 591 557 592 // Unconditionally release an explicit in-stru 558 // Unconditionally release an explicit in-structure reference count. 593 static bool typesafe_ref_release(struct refsca 559 static bool typesafe_ref_release(struct refscale_typesafe *rtsp, unsigned int start) 594 { 560 { 595 if (!atomic_dec_return(&rtsp->rts_refc 561 if (!atomic_dec_return(&rtsp->rts_refctr)) { 596 WRITE_ONCE(rtsp->a, rtsp->a + 562 WRITE_ONCE(rtsp->a, rtsp->a + 1); 597 kmem_cache_free(typesafe_kmem_ 563 kmem_cache_free(typesafe_kmem_cachep, rtsp); 598 } 564 } 599 return true; 565 return true; 600 } 566 } 601 567 602 // Unconditionally acquire an explicit in-stru 568 // Unconditionally acquire an explicit in-structure spinlock. 603 static bool typesafe_lock_acquire(struct refsc 569 static bool typesafe_lock_acquire(struct refscale_typesafe *rtsp, unsigned int *start) 604 { 570 { 605 spin_lock(&rtsp->rts_lock); 571 spin_lock(&rtsp->rts_lock); 606 return true; 572 return true; 607 } 573 } 608 574 609 // Unconditionally release an explicit in-stru 575 // Unconditionally release an explicit in-structure spinlock. 610 static bool typesafe_lock_release(struct refsc 576 static bool typesafe_lock_release(struct refscale_typesafe *rtsp, unsigned int start) 611 { 577 { 612 spin_unlock(&rtsp->rts_lock); 578 spin_unlock(&rtsp->rts_lock); 613 return true; 579 return true; 614 } 580 } 615 581 616 // Unconditionally acquire an explicit in-stru 582 // Unconditionally acquire an explicit in-structure sequence lock. 617 static bool typesafe_seqlock_acquire(struct re 583 static bool typesafe_seqlock_acquire(struct refscale_typesafe *rtsp, unsigned int *start) 618 { 584 { 619 *start = read_seqbegin(&rtsp->rts_seql 585 *start = read_seqbegin(&rtsp->rts_seqlock); 620 return true; 586 return true; 621 } 587 } 622 588 623 // Conditionally release an explicit in-struct 589 // Conditionally release an explicit in-structure sequence lock. Return 624 // true if this release was successful, that i 590 // true if this release was successful, that is, if no retry is required. 625 static bool typesafe_seqlock_release(struct re 591 static bool typesafe_seqlock_release(struct refscale_typesafe *rtsp, unsigned int start) 626 { 592 { 627 return !read_seqretry(&rtsp->rts_seqlo 593 return !read_seqretry(&rtsp->rts_seqlock, start); 628 } 594 } 629 595 630 // Do a read-side critical section with the sp 596 // Do a read-side critical section with the specified delay in 631 // microseconds and nanoseconds inserted so as 597 // microseconds and nanoseconds inserted so as to increase probability 632 // of failure. 598 // of failure. 633 static void typesafe_delay_section(const int n 599 static void typesafe_delay_section(const int nloops, const int udl, const int ndl) 634 { 600 { 635 unsigned int a; 601 unsigned int a; 636 unsigned int b; 602 unsigned int b; 637 int i; 603 int i; 638 long idx; 604 long idx; 639 struct refscale_typesafe *rtsp; 605 struct refscale_typesafe *rtsp; 640 unsigned int start; 606 unsigned int start; 641 607 642 for (i = nloops; i >= 0; i--) { 608 for (i = nloops; i >= 0; i--) { 643 preempt_disable(); 609 preempt_disable(); 644 idx = torture_random(this_cpu_ 610 idx = torture_random(this_cpu_ptr(&refscale_rand)) % rtsarray_size; 645 preempt_enable(); 611 preempt_enable(); 646 retry: 612 retry: 647 rcu_read_lock(); 613 rcu_read_lock(); 648 rtsp = rcu_dereference(rtsarra 614 rtsp = rcu_dereference(rtsarray[idx]); 649 a = READ_ONCE(rtsp->a); 615 a = READ_ONCE(rtsp->a); 650 if (!rts_acquire(rtsp, &start) 616 if (!rts_acquire(rtsp, &start)) { 651 rcu_read_unlock(); 617 rcu_read_unlock(); 652 goto retry; 618 goto retry; 653 } 619 } 654 if (a != READ_ONCE(rtsp->a)) { 620 if (a != READ_ONCE(rtsp->a)) { 655 (void)rts_release(rtsp 621 (void)rts_release(rtsp, start); 656 rcu_read_unlock(); 622 rcu_read_unlock(); 657 goto retry; 623 goto retry; 658 } 624 } 659 un_delay(udl, ndl); 625 un_delay(udl, ndl); 660 b = READ_ONCE(rtsp->a); << 661 // Remember, seqlock read-side 626 // Remember, seqlock read-side release can fail. 662 if (!rts_release(rtsp, start)) 627 if (!rts_release(rtsp, start)) { 663 rcu_read_unlock(); 628 rcu_read_unlock(); 664 goto retry; 629 goto retry; 665 } 630 } >> 631 b = READ_ONCE(rtsp->a); 666 WARN_ONCE(a != b, "Re-read of 632 WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b); 667 b = rtsp->b; 633 b = rtsp->b; 668 rcu_read_unlock(); 634 rcu_read_unlock(); 669 WARN_ON_ONCE(a * a != b); 635 WARN_ON_ONCE(a * a != b); 670 } 636 } 671 } 637 } 672 638 673 // Because the acquisition and release methods 639 // Because the acquisition and release methods are expensive, there 674 // is no point in optimizing away the un_delay 640 // is no point in optimizing away the un_delay() function's two checks. 675 // Thus simply define typesafe_read_section() 641 // Thus simply define typesafe_read_section() as a simple wrapper around 676 // typesafe_delay_section(). 642 // typesafe_delay_section(). 677 static void typesafe_read_section(const int nl 643 static void typesafe_read_section(const int nloops) 678 { 644 { 679 typesafe_delay_section(nloops, 0, 0); 645 typesafe_delay_section(nloops, 0, 0); 680 } 646 } 681 647 682 // Allocate and initialize one refscale_typesa 648 // Allocate and initialize one refscale_typesafe structure. 683 static struct refscale_typesafe *typesafe_allo 649 static struct refscale_typesafe *typesafe_alloc_one(void) 684 { 650 { 685 struct refscale_typesafe *rtsp; 651 struct refscale_typesafe *rtsp; 686 652 687 rtsp = kmem_cache_alloc(typesafe_kmem_ 653 rtsp = kmem_cache_alloc(typesafe_kmem_cachep, GFP_KERNEL); 688 if (!rtsp) 654 if (!rtsp) 689 return NULL; 655 return NULL; 690 atomic_set(&rtsp->rts_refctr, 1); 656 atomic_set(&rtsp->rts_refctr, 1); 691 WRITE_ONCE(rtsp->a, rtsp->a + 1); 657 WRITE_ONCE(rtsp->a, rtsp->a + 1); 692 WRITE_ONCE(rtsp->b, rtsp->a * rtsp->a) 658 WRITE_ONCE(rtsp->b, rtsp->a * rtsp->a); 693 return rtsp; 659 return rtsp; 694 } 660 } 695 661 696 // Slab-allocator constructor for refscale_typ 662 // Slab-allocator constructor for refscale_typesafe structures created 697 // out of a new slab of system memory. 663 // out of a new slab of system memory. 698 static void refscale_typesafe_ctor(void *rtsp_ 664 static void refscale_typesafe_ctor(void *rtsp_in) 699 { 665 { 700 struct refscale_typesafe *rtsp = rtsp_ 666 struct refscale_typesafe *rtsp = rtsp_in; 701 667 702 spin_lock_init(&rtsp->rts_lock); 668 spin_lock_init(&rtsp->rts_lock); 703 seqlock_init(&rtsp->rts_seqlock); 669 seqlock_init(&rtsp->rts_seqlock); 704 preempt_disable(); 670 preempt_disable(); 705 rtsp->a = torture_random(this_cpu_ptr( 671 rtsp->a = torture_random(this_cpu_ptr(&refscale_rand)); 706 preempt_enable(); 672 preempt_enable(); 707 } 673 } 708 674 709 static const struct ref_scale_ops typesafe_ref !! 675 static struct ref_scale_ops typesafe_ref_ops; 710 static const struct ref_scale_ops typesafe_loc !! 676 static struct ref_scale_ops typesafe_lock_ops; 711 static const struct ref_scale_ops typesafe_seq !! 677 static struct ref_scale_ops typesafe_seqlock_ops; 712 678 713 // Initialize for a typesafe test. 679 // Initialize for a typesafe test. 714 static bool typesafe_init(void) 680 static bool typesafe_init(void) 715 { 681 { 716 long idx; 682 long idx; 717 long si = lookup_instances; 683 long si = lookup_instances; 718 684 719 typesafe_kmem_cachep = kmem_cache_crea 685 typesafe_kmem_cachep = kmem_cache_create("refscale_typesafe", 720 686 sizeof(struct refscale_typesafe), sizeof(void *), 721 687 SLAB_TYPESAFE_BY_RCU, refscale_typesafe_ctor); 722 if (!typesafe_kmem_cachep) 688 if (!typesafe_kmem_cachep) 723 return false; 689 return false; 724 if (si < 0) 690 if (si < 0) 725 si = -si * nr_cpu_ids; 691 si = -si * nr_cpu_ids; 726 else if (si == 0) 692 else if (si == 0) 727 si = nr_cpu_ids; 693 si = nr_cpu_ids; 728 rtsarray_size = si; 694 rtsarray_size = si; 729 rtsarray = kcalloc(si, sizeof(*rtsarra 695 rtsarray = kcalloc(si, sizeof(*rtsarray), GFP_KERNEL); 730 if (!rtsarray) 696 if (!rtsarray) 731 return false; 697 return false; 732 for (idx = 0; idx < rtsarray_size; idx 698 for (idx = 0; idx < rtsarray_size; idx++) { 733 rtsarray[idx] = typesafe_alloc 699 rtsarray[idx] = typesafe_alloc_one(); 734 if (!rtsarray[idx]) 700 if (!rtsarray[idx]) 735 return false; 701 return false; 736 } 702 } 737 if (cur_ops == &typesafe_ref_ops) { 703 if (cur_ops == &typesafe_ref_ops) { 738 rts_acquire = typesafe_ref_acq 704 rts_acquire = typesafe_ref_acquire; 739 rts_release = typesafe_ref_rel 705 rts_release = typesafe_ref_release; 740 } else if (cur_ops == &typesafe_lock_o 706 } else if (cur_ops == &typesafe_lock_ops) { 741 rts_acquire = typesafe_lock_ac 707 rts_acquire = typesafe_lock_acquire; 742 rts_release = typesafe_lock_re 708 rts_release = typesafe_lock_release; 743 } else if (cur_ops == &typesafe_seqloc 709 } else if (cur_ops == &typesafe_seqlock_ops) { 744 rts_acquire = typesafe_seqlock 710 rts_acquire = typesafe_seqlock_acquire; 745 rts_release = typesafe_seqlock 711 rts_release = typesafe_seqlock_release; 746 } else { 712 } else { 747 WARN_ON_ONCE(1); 713 WARN_ON_ONCE(1); 748 return false; 714 return false; 749 } 715 } 750 return true; 716 return true; 751 } 717 } 752 718 753 // Clean up after a typesafe test. 719 // Clean up after a typesafe test. 754 static void typesafe_cleanup(void) 720 static void typesafe_cleanup(void) 755 { 721 { 756 long idx; 722 long idx; 757 723 758 if (rtsarray) { 724 if (rtsarray) { 759 for (idx = 0; idx < rtsarray_s 725 for (idx = 0; idx < rtsarray_size; idx++) 760 kmem_cache_free(typesa 726 kmem_cache_free(typesafe_kmem_cachep, rtsarray[idx]); 761 kfree(rtsarray); 727 kfree(rtsarray); 762 rtsarray = NULL; 728 rtsarray = NULL; 763 rtsarray_size = 0; 729 rtsarray_size = 0; 764 } 730 } 765 kmem_cache_destroy(typesafe_kmem_cache 731 kmem_cache_destroy(typesafe_kmem_cachep); 766 typesafe_kmem_cachep = NULL; 732 typesafe_kmem_cachep = NULL; 767 rts_acquire = NULL; 733 rts_acquire = NULL; 768 rts_release = NULL; 734 rts_release = NULL; 769 } 735 } 770 736 771 // The typesafe_init() function distinguishes 737 // The typesafe_init() function distinguishes these structures by address. 772 static const struct ref_scale_ops typesafe_ref !! 738 static struct ref_scale_ops typesafe_ref_ops = { 773 .init = typesafe_init, 739 .init = typesafe_init, 774 .cleanup = typesafe_cleanup, 740 .cleanup = typesafe_cleanup, 775 .readsection = typesafe_read_sectio 741 .readsection = typesafe_read_section, 776 .delaysection = typesafe_delay_secti 742 .delaysection = typesafe_delay_section, 777 .name = "typesafe_ref" 743 .name = "typesafe_ref" 778 }; 744 }; 779 745 780 static const struct ref_scale_ops typesafe_loc !! 746 static struct ref_scale_ops typesafe_lock_ops = { 781 .init = typesafe_init, 747 .init = typesafe_init, 782 .cleanup = typesafe_cleanup, 748 .cleanup = typesafe_cleanup, 783 .readsection = typesafe_read_sectio 749 .readsection = typesafe_read_section, 784 .delaysection = typesafe_delay_secti 750 .delaysection = typesafe_delay_section, 785 .name = "typesafe_lock" 751 .name = "typesafe_lock" 786 }; 752 }; 787 753 788 static const struct ref_scale_ops typesafe_seq !! 754 static struct ref_scale_ops typesafe_seqlock_ops = { 789 .init = typesafe_init, 755 .init = typesafe_init, 790 .cleanup = typesafe_cleanup, 756 .cleanup = typesafe_cleanup, 791 .readsection = typesafe_read_sectio 757 .readsection = typesafe_read_section, 792 .delaysection = typesafe_delay_secti 758 .delaysection = typesafe_delay_section, 793 .name = "typesafe_seqlock" 759 .name = "typesafe_seqlock" 794 }; 760 }; 795 761 796 static void rcu_scale_one_reader(void) 762 static void rcu_scale_one_reader(void) 797 { 763 { 798 if (readdelay <= 0) 764 if (readdelay <= 0) 799 cur_ops->readsection(loops); 765 cur_ops->readsection(loops); 800 else 766 else 801 cur_ops->delaysection(loops, r 767 cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000); 802 } 768 } 803 769 804 // Reader kthread. Repeatedly does empty RCU 770 // Reader kthread. Repeatedly does empty RCU read-side 805 // critical section, minimizing update-side in 771 // critical section, minimizing update-side interference. 806 static int 772 static int 807 ref_scale_reader(void *arg) 773 ref_scale_reader(void *arg) 808 { 774 { 809 unsigned long flags; 775 unsigned long flags; 810 long me = (long)arg; 776 long me = (long)arg; 811 struct reader_task *rt = &(reader_task 777 struct reader_task *rt = &(reader_tasks[me]); 812 u64 start; 778 u64 start; 813 s64 duration; 779 s64 duration; 814 780 815 VERBOSE_SCALEOUT_BATCH("ref_scale_read 781 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me); 816 WARN_ON_ONCE(set_cpus_allowed_ptr(curr 782 WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids))); 817 set_user_nice(current, MAX_NICE); 783 set_user_nice(current, MAX_NICE); 818 atomic_inc(&n_init); 784 atomic_inc(&n_init); 819 if (holdoff) 785 if (holdoff) 820 schedule_timeout_interruptible 786 schedule_timeout_interruptible(holdoff * HZ); 821 repeat: 787 repeat: 822 VERBOSE_SCALEOUT_BATCH("ref_scale_read 788 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id()); 823 789 824 // Wait for signal that this reader ca 790 // Wait for signal that this reader can start. 825 wait_event(rt->wq, (atomic_read(&nread 791 wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) || 826 torture_must_stop() 792 torture_must_stop()); 827 793 828 if (torture_must_stop()) 794 if (torture_must_stop()) 829 goto end; 795 goto end; 830 796 831 // Make sure that the CPU is affinitiz 797 // Make sure that the CPU is affinitized appropriately during testing. 832 WARN_ON_ONCE(raw_smp_processor_id() != 798 WARN_ON_ONCE(raw_smp_processor_id() != me); 833 799 834 WRITE_ONCE(rt->start_reader, 0); 800 WRITE_ONCE(rt->start_reader, 0); 835 if (!atomic_dec_return(&n_started)) 801 if (!atomic_dec_return(&n_started)) 836 while (atomic_read_acquire(&n_ 802 while (atomic_read_acquire(&n_started)) 837 cpu_relax(); 803 cpu_relax(); 838 804 839 VERBOSE_SCALEOUT_BATCH("ref_scale_read 805 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx); 840 806 841 807 842 // To reduce noise, do an initial cach 808 // To reduce noise, do an initial cache-warming invocation, check 843 // in, and then keep warming until eve 809 // in, and then keep warming until everyone has checked in. 844 rcu_scale_one_reader(); 810 rcu_scale_one_reader(); 845 if (!atomic_dec_return(&n_warmedup)) 811 if (!atomic_dec_return(&n_warmedup)) 846 while (atomic_read_acquire(&n_ 812 while (atomic_read_acquire(&n_warmedup)) 847 rcu_scale_one_reader() 813 rcu_scale_one_reader(); 848 // Also keep interrupts disabled. Thi 814 // Also keep interrupts disabled. This also has the effect 849 // of preventing entries into slow pat 815 // of preventing entries into slow path for rcu_read_unlock(). 850 local_irq_save(flags); 816 local_irq_save(flags); 851 start = ktime_get_mono_fast_ns(); 817 start = ktime_get_mono_fast_ns(); 852 818 853 rcu_scale_one_reader(); 819 rcu_scale_one_reader(); 854 820 855 duration = ktime_get_mono_fast_ns() - 821 duration = ktime_get_mono_fast_ns() - start; 856 local_irq_restore(flags); 822 local_irq_restore(flags); 857 823 858 rt->last_duration_ns = WARN_ON_ONCE(du 824 rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration; 859 // To reduce runtime-skew noise, do ma 825 // To reduce runtime-skew noise, do maintain-load invocations until 860 // everyone is done. 826 // everyone is done. 861 if (!atomic_dec_return(&n_cooleddown)) 827 if (!atomic_dec_return(&n_cooleddown)) 862 while (atomic_read_acquire(&n_ 828 while (atomic_read_acquire(&n_cooleddown)) 863 rcu_scale_one_reader() 829 rcu_scale_one_reader(); 864 830 865 if (atomic_dec_and_test(&nreaders_exp) 831 if (atomic_dec_and_test(&nreaders_exp)) 866 wake_up(&main_wq); 832 wake_up(&main_wq); 867 833 868 VERBOSE_SCALEOUT_BATCH("ref_scale_read 834 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)", 869 me, exp_idx, a 835 me, exp_idx, atomic_read(&nreaders_exp)); 870 836 871 if (!torture_must_stop()) 837 if (!torture_must_stop()) 872 goto repeat; 838 goto repeat; 873 end: 839 end: 874 torture_kthread_stopping("ref_scale_re 840 torture_kthread_stopping("ref_scale_reader"); 875 return 0; 841 return 0; 876 } 842 } 877 843 878 static void reset_readers(void) 844 static void reset_readers(void) 879 { 845 { 880 int i; 846 int i; 881 struct reader_task *rt; 847 struct reader_task *rt; 882 848 883 for (i = 0; i < nreaders; i++) { 849 for (i = 0; i < nreaders; i++) { 884 rt = &(reader_tasks[i]); 850 rt = &(reader_tasks[i]); 885 851 886 rt->last_duration_ns = 0; 852 rt->last_duration_ns = 0; 887 } 853 } 888 } 854 } 889 855 890 // Print the results of each reader and return 856 // Print the results of each reader and return the sum of all their durations. 891 static u64 process_durations(int n) 857 static u64 process_durations(int n) 892 { 858 { 893 int i; 859 int i; 894 struct reader_task *rt; 860 struct reader_task *rt; 895 struct seq_buf s; !! 861 char buf1[64]; 896 char *buf; 862 char *buf; 897 u64 sum = 0; 863 u64 sum = 0; 898 864 899 buf = kmalloc(800 + 64, GFP_KERNEL); 865 buf = kmalloc(800 + 64, GFP_KERNEL); 900 if (!buf) 866 if (!buf) 901 return 0; 867 return 0; 902 seq_buf_init(&s, buf, 800 + 64); !! 868 buf[0] = 0; 903 !! 869 sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)", 904 seq_buf_printf(&s, "Experiment #%d (Fo !! 870 exp_idx); 905 exp_idx); << 906 871 907 for (i = 0; i < n && !torture_must_sto 872 for (i = 0; i < n && !torture_must_stop(); i++) { 908 rt = &(reader_tasks[i]); 873 rt = &(reader_tasks[i]); >> 874 sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns); 909 875 910 if (i % 5 == 0) 876 if (i % 5 == 0) 911 seq_buf_putc(&s, '\n') !! 877 strcat(buf, "\n"); 912 !! 878 if (strlen(buf) >= 800) { 913 if (seq_buf_used(&s) >= 800) { !! 879 pr_alert("%s", buf); 914 pr_alert("%s", seq_buf !! 880 buf[0] = 0; 915 seq_buf_clear(&s); << 916 } 881 } 917 !! 882 strcat(buf, buf1); 918 seq_buf_printf(&s, "%d: %llu\t << 919 883 920 sum += rt->last_duration_ns; 884 sum += rt->last_duration_ns; 921 } 885 } 922 pr_alert("%s\n", seq_buf_str(&s)); !! 886 pr_alert("%s\n", buf); 923 887 924 kfree(buf); 888 kfree(buf); 925 return sum; 889 return sum; 926 } 890 } 927 891 928 // The main_func is the main orchestrator, it 892 // The main_func is the main orchestrator, it performs a bunch of 929 // experiments. For every experiment, it orde 893 // experiments. For every experiment, it orders all the readers 930 // involved to start and waits for them to fin 894 // involved to start and waits for them to finish the experiment. It 931 // then reads their timestamps and starts the 895 // then reads their timestamps and starts the next experiment. Each 932 // experiment progresses from 1 concurrent rea 896 // experiment progresses from 1 concurrent reader to N of them at which 933 // point all the timestamps are printed. 897 // point all the timestamps are printed. 934 static int main_func(void *arg) 898 static int main_func(void *arg) 935 { 899 { 936 int exp, r; 900 int exp, r; 937 char buf1[64]; 901 char buf1[64]; 938 char *buf; 902 char *buf; 939 u64 *result_avg; 903 u64 *result_avg; 940 904 941 set_cpus_allowed_ptr(current, cpumask_ 905 set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids)); 942 set_user_nice(current, MAX_NICE); 906 set_user_nice(current, MAX_NICE); 943 907 944 VERBOSE_SCALEOUT("main_func task start 908 VERBOSE_SCALEOUT("main_func task started"); 945 result_avg = kzalloc(nruns * sizeof(*r 909 result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL); 946 buf = kzalloc(800 + 64, GFP_KERNEL); 910 buf = kzalloc(800 + 64, GFP_KERNEL); 947 if (!result_avg || !buf) { 911 if (!result_avg || !buf) { 948 SCALEOUT_ERRSTRING("out of mem 912 SCALEOUT_ERRSTRING("out of memory"); 949 goto oom_exit; 913 goto oom_exit; 950 } 914 } 951 if (holdoff) 915 if (holdoff) 952 schedule_timeout_interruptible 916 schedule_timeout_interruptible(holdoff * HZ); 953 917 954 // Wait for all threads to start. 918 // Wait for all threads to start. 955 atomic_inc(&n_init); 919 atomic_inc(&n_init); 956 while (atomic_read(&n_init) < nreaders 920 while (atomic_read(&n_init) < nreaders + 1) 957 schedule_timeout_uninterruptib 921 schedule_timeout_uninterruptible(1); 958 922 959 // Start exp readers up per experiment 923 // Start exp readers up per experiment 960 for (exp = 0; exp < nruns && !torture_ 924 for (exp = 0; exp < nruns && !torture_must_stop(); exp++) { 961 if (torture_must_stop()) 925 if (torture_must_stop()) 962 goto end; 926 goto end; 963 927 964 reset_readers(); 928 reset_readers(); 965 atomic_set(&nreaders_exp, nrea 929 atomic_set(&nreaders_exp, nreaders); 966 atomic_set(&n_started, nreader 930 atomic_set(&n_started, nreaders); 967 atomic_set(&n_warmedup, nreade 931 atomic_set(&n_warmedup, nreaders); 968 atomic_set(&n_cooleddown, nrea 932 atomic_set(&n_cooleddown, nreaders); 969 933 970 exp_idx = exp; 934 exp_idx = exp; 971 935 972 for (r = 0; r < nreaders; r++) 936 for (r = 0; r < nreaders; r++) { 973 smp_store_release(&rea 937 smp_store_release(&reader_tasks[r].start_reader, 1); 974 wake_up(&reader_tasks[ 938 wake_up(&reader_tasks[r].wq); 975 } 939 } 976 940 977 VERBOSE_SCALEOUT("main_func: e 941 VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers", 978 nreaders); 942 nreaders); 979 943 980 wait_event(main_wq, 944 wait_event(main_wq, 981 !atomic_read(&nread 945 !atomic_read(&nreaders_exp) || torture_must_stop()); 982 946 983 VERBOSE_SCALEOUT("main_func: e 947 VERBOSE_SCALEOUT("main_func: experiment ended"); 984 948 985 if (torture_must_stop()) 949 if (torture_must_stop()) 986 goto end; 950 goto end; 987 951 988 result_avg[exp] = div_u64(1000 952 result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops); 989 } 953 } 990 954 991 // Print the average of all experiment 955 // Print the average of all experiments 992 SCALEOUT("END OF TEST. Calculating ave 956 SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n"); 993 957 994 pr_alert("Runs\tTime(ns)\n"); 958 pr_alert("Runs\tTime(ns)\n"); 995 for (exp = 0; exp < nruns; exp++) { 959 for (exp = 0; exp < nruns; exp++) { 996 u64 avg; 960 u64 avg; 997 u32 rem; 961 u32 rem; 998 962 999 avg = div_u64_rem(result_avg[e 963 avg = div_u64_rem(result_avg[exp], 1000, &rem); 1000 sprintf(buf1, "%d\t%llu.%03u\ 964 sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem); 1001 strcat(buf, buf1); 965 strcat(buf, buf1); 1002 if (strlen(buf) >= 800) { 966 if (strlen(buf) >= 800) { 1003 pr_alert("%s", buf); 967 pr_alert("%s", buf); 1004 buf[0] = 0; 968 buf[0] = 0; 1005 } 969 } 1006 } 970 } 1007 971 1008 pr_alert("%s", buf); 972 pr_alert("%s", buf); 1009 973 1010 oom_exit: 974 oom_exit: 1011 // This will shutdown everything incl 975 // This will shutdown everything including us. 1012 if (shutdown) { 976 if (shutdown) { 1013 shutdown_start = 1; 977 shutdown_start = 1; 1014 wake_up(&shutdown_wq); 978 wake_up(&shutdown_wq); 1015 } 979 } 1016 980 1017 // Wait for torture to stop us 981 // Wait for torture to stop us 1018 while (!torture_must_stop()) 982 while (!torture_must_stop()) 1019 schedule_timeout_uninterrupti 983 schedule_timeout_uninterruptible(1); 1020 984 1021 end: 985 end: 1022 torture_kthread_stopping("main_func") 986 torture_kthread_stopping("main_func"); 1023 kfree(result_avg); 987 kfree(result_avg); 1024 kfree(buf); 988 kfree(buf); 1025 return 0; 989 return 0; 1026 } 990 } 1027 991 1028 static void 992 static void 1029 ref_scale_print_module_parms(const struct ref !! 993 ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag) 1030 { 994 { 1031 pr_alert("%s" SCALE_FLAG 995 pr_alert("%s" SCALE_FLAG 1032 "--- %s: verbose=%d verbose !! 996 "--- %s: verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag, 1033 verbose, verbose_batched, sh !! 997 verbose, shutdown, holdoff, loops, nreaders, nruns, readdelay); 1034 } 998 } 1035 999 1036 static void 1000 static void 1037 ref_scale_cleanup(void) 1001 ref_scale_cleanup(void) 1038 { 1002 { 1039 int i; 1003 int i; 1040 1004 1041 if (torture_cleanup_begin()) 1005 if (torture_cleanup_begin()) 1042 return; 1006 return; 1043 1007 1044 if (!cur_ops) { 1008 if (!cur_ops) { 1045 torture_cleanup_end(); 1009 torture_cleanup_end(); 1046 return; 1010 return; 1047 } 1011 } 1048 1012 1049 if (reader_tasks) { 1013 if (reader_tasks) { 1050 for (i = 0; i < nreaders; i++ 1014 for (i = 0; i < nreaders; i++) 1051 torture_stop_kthread( 1015 torture_stop_kthread("ref_scale_reader", 1052 1016 reader_tasks[i].task); 1053 } 1017 } 1054 kfree(reader_tasks); 1018 kfree(reader_tasks); 1055 1019 1056 torture_stop_kthread("main_task", mai 1020 torture_stop_kthread("main_task", main_task); 1057 kfree(main_task); 1021 kfree(main_task); 1058 1022 1059 // Do scale-type-specific cleanup ope 1023 // Do scale-type-specific cleanup operations. 1060 if (cur_ops->cleanup != NULL) 1024 if (cur_ops->cleanup != NULL) 1061 cur_ops->cleanup(); 1025 cur_ops->cleanup(); 1062 1026 1063 torture_cleanup_end(); 1027 torture_cleanup_end(); 1064 } 1028 } 1065 1029 1066 // Shutdown kthread. Just waits to be awaken 1030 // Shutdown kthread. Just waits to be awakened, then shuts down system. 1067 static int 1031 static int 1068 ref_scale_shutdown(void *arg) 1032 ref_scale_shutdown(void *arg) 1069 { 1033 { 1070 wait_event_idle(shutdown_wq, shutdown 1034 wait_event_idle(shutdown_wq, shutdown_start); 1071 1035 1072 smp_mb(); // Wake before output. 1036 smp_mb(); // Wake before output. 1073 ref_scale_cleanup(); 1037 ref_scale_cleanup(); 1074 kernel_power_off(); 1038 kernel_power_off(); 1075 1039 1076 return -EINVAL; 1040 return -EINVAL; 1077 } 1041 } 1078 1042 1079 static int __init 1043 static int __init 1080 ref_scale_init(void) 1044 ref_scale_init(void) 1081 { 1045 { 1082 long i; 1046 long i; 1083 int firsterr = 0; 1047 int firsterr = 0; 1084 static const struct ref_scale_ops *sc !! 1048 static struct ref_scale_ops *scale_ops[] = { 1085 &rcu_ops, &srcu_ops, RCU_TRAC 1049 &rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops, 1086 &rwsem_ops, &lock_ops, &lock_ !! 1050 &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, 1087 &typesafe_ref_ops, &typesafe_ 1051 &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops, 1088 }; 1052 }; 1089 1053 1090 if (!torture_init_begin(scale_type, v 1054 if (!torture_init_begin(scale_type, verbose)) 1091 return -EBUSY; 1055 return -EBUSY; 1092 1056 1093 for (i = 0; i < ARRAY_SIZE(scale_ops) 1057 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) { 1094 cur_ops = scale_ops[i]; 1058 cur_ops = scale_ops[i]; 1095 if (strcmp(scale_type, cur_op 1059 if (strcmp(scale_type, cur_ops->name) == 0) 1096 break; 1060 break; 1097 } 1061 } 1098 if (i == ARRAY_SIZE(scale_ops)) { 1062 if (i == ARRAY_SIZE(scale_ops)) { 1099 pr_alert("rcu-scale: invalid 1063 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type); 1100 pr_alert("rcu-scale types:"); 1064 pr_alert("rcu-scale types:"); 1101 for (i = 0; i < ARRAY_SIZE(sc 1065 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) 1102 pr_cont(" %s", scale_ 1066 pr_cont(" %s", scale_ops[i]->name); 1103 pr_cont("\n"); 1067 pr_cont("\n"); 1104 firsterr = -EINVAL; 1068 firsterr = -EINVAL; 1105 cur_ops = NULL; 1069 cur_ops = NULL; 1106 goto unwind; 1070 goto unwind; 1107 } 1071 } 1108 if (cur_ops->init) 1072 if (cur_ops->init) 1109 if (!cur_ops->init()) { 1073 if (!cur_ops->init()) { 1110 firsterr = -EUCLEAN; 1074 firsterr = -EUCLEAN; 1111 goto unwind; 1075 goto unwind; 1112 } 1076 } 1113 1077 1114 ref_scale_print_module_parms(cur_ops, 1078 ref_scale_print_module_parms(cur_ops, "Start of test"); 1115 1079 1116 // Shutdown task 1080 // Shutdown task 1117 if (shutdown) { 1081 if (shutdown) { 1118 init_waitqueue_head(&shutdown 1082 init_waitqueue_head(&shutdown_wq); 1119 firsterr = torture_create_kth 1083 firsterr = torture_create_kthread(ref_scale_shutdown, NULL, 1120 1084 shutdown_task); 1121 if (torture_init_error(firste 1085 if (torture_init_error(firsterr)) 1122 goto unwind; 1086 goto unwind; 1123 schedule_timeout_uninterrupti 1087 schedule_timeout_uninterruptible(1); 1124 } 1088 } 1125 1089 1126 // Reader tasks (default to ~75% of o 1090 // Reader tasks (default to ~75% of online CPUs). 1127 if (nreaders < 0) 1091 if (nreaders < 0) 1128 nreaders = (num_online_cpus() 1092 nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2); 1129 if (WARN_ONCE(loops <= 0, "%s: loops 1093 if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops)) 1130 loops = 1; 1094 loops = 1; 1131 if (WARN_ONCE(nreaders <= 0, "%s: nre 1095 if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders)) 1132 nreaders = 1; 1096 nreaders = 1; 1133 if (WARN_ONCE(nruns <= 0, "%s: nruns 1097 if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns)) 1134 nruns = 1; 1098 nruns = 1; 1135 reader_tasks = kcalloc(nreaders, size 1099 reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]), 1136 GFP_KERNEL); 1100 GFP_KERNEL); 1137 if (!reader_tasks) { 1101 if (!reader_tasks) { 1138 SCALEOUT_ERRSTRING("out of me 1102 SCALEOUT_ERRSTRING("out of memory"); 1139 firsterr = -ENOMEM; 1103 firsterr = -ENOMEM; 1140 goto unwind; 1104 goto unwind; 1141 } 1105 } 1142 1106 1143 VERBOSE_SCALEOUT("Starting %d reader 1107 VERBOSE_SCALEOUT("Starting %d reader threads", nreaders); 1144 1108 1145 for (i = 0; i < nreaders; i++) { 1109 for (i = 0; i < nreaders; i++) { 1146 init_waitqueue_head(&reader_t << 1147 firsterr = torture_create_kth 1110 firsterr = torture_create_kthread(ref_scale_reader, (void *)i, 1148 1111 reader_tasks[i].task); 1149 if (torture_init_error(firste 1112 if (torture_init_error(firsterr)) 1150 goto unwind; 1113 goto unwind; >> 1114 >> 1115 init_waitqueue_head(&(reader_tasks[i].wq)); 1151 } 1116 } 1152 1117 1153 // Main Task 1118 // Main Task 1154 init_waitqueue_head(&main_wq); 1119 init_waitqueue_head(&main_wq); 1155 firsterr = torture_create_kthread(mai 1120 firsterr = torture_create_kthread(main_func, NULL, main_task); 1156 if (torture_init_error(firsterr)) 1121 if (torture_init_error(firsterr)) 1157 goto unwind; 1122 goto unwind; 1158 1123 1159 torture_init_end(); 1124 torture_init_end(); 1160 return 0; 1125 return 0; 1161 1126 1162 unwind: 1127 unwind: 1163 torture_init_end(); 1128 torture_init_end(); 1164 ref_scale_cleanup(); 1129 ref_scale_cleanup(); 1165 if (shutdown) { 1130 if (shutdown) { 1166 WARN_ON(!IS_MODULE(CONFIG_RCU 1131 WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST)); 1167 kernel_power_off(); 1132 kernel_power_off(); 1168 } 1133 } 1169 return firsterr; 1134 return firsterr; 1170 } 1135 } 1171 1136 1172 module_init(ref_scale_init); 1137 module_init(ref_scale_init); 1173 module_exit(ref_scale_cleanup); 1138 module_exit(ref_scale_cleanup); 1174 1139
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.