1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * numa.c 3 * numa.c 4 * 4 * 5 * numa: Simulate NUMA-sensitive workload and 5 * numa: Simulate NUMA-sensitive workload and measure their NUMA performance 6 */ 6 */ 7 7 8 #include <inttypes.h> 8 #include <inttypes.h> >> 9 /* For the CLR_() macros */ >> 10 #include <pthread.h> 9 11 10 #include <subcmd/parse-options.h> 12 #include <subcmd/parse-options.h> 11 #include "../util/cloexec.h" 13 #include "../util/cloexec.h" 12 14 13 #include "bench.h" 15 #include "bench.h" 14 16 15 #include <errno.h> 17 #include <errno.h> 16 #include <sched.h> 18 #include <sched.h> 17 #include <stdio.h> 19 #include <stdio.h> 18 #include <assert.h> 20 #include <assert.h> 19 #include <debug.h> << 20 #include <malloc.h> 21 #include <malloc.h> 21 #include <signal.h> 22 #include <signal.h> 22 #include <stdlib.h> 23 #include <stdlib.h> 23 #include <string.h> 24 #include <string.h> 24 #include <unistd.h> 25 #include <unistd.h> 25 #include <sys/mman.h> 26 #include <sys/mman.h> 26 #include <sys/time.h> 27 #include <sys/time.h> 27 #include <sys/resource.h> 28 #include <sys/resource.h> 28 #include <sys/wait.h> 29 #include <sys/wait.h> 29 #include <sys/prctl.h> 30 #include <sys/prctl.h> 30 #include <sys/types.h> 31 #include <sys/types.h> 31 #include <linux/kernel.h> 32 #include <linux/kernel.h> 32 #include <linux/time64.h> 33 #include <linux/time64.h> 33 #include <linux/numa.h> 34 #include <linux/numa.h> 34 #include <linux/zalloc.h> 35 #include <linux/zalloc.h> 35 36 36 #include "../util/header.h" << 37 #include "../util/mutex.h" << 38 #include <numa.h> 37 #include <numa.h> 39 #include <numaif.h> 38 #include <numaif.h> 40 39 41 #ifndef RUSAGE_THREAD 40 #ifndef RUSAGE_THREAD 42 # define RUSAGE_THREAD 1 41 # define RUSAGE_THREAD 1 43 #endif 42 #endif 44 43 45 /* 44 /* 46 * Regular printout to the terminal, suppresse 45 * Regular printout to the terminal, suppressed if -q is specified: 47 */ 46 */ 48 #define tprintf(x...) do { if (g && g->p.show_ 47 #define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0) 49 48 50 /* 49 /* 51 * Debug printf: 50 * Debug printf: 52 */ 51 */ 53 #undef dprintf 52 #undef dprintf 54 #define dprintf(x...) do { if (g && g->p.show_ 53 #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0) 55 54 56 struct thread_data { 55 struct thread_data { 57 int curr_cpu; 56 int curr_cpu; 58 cpu_set_t *bind_cpumask; !! 57 cpu_set_t bind_cpumask; 59 int bind_node; 58 int bind_node; 60 u8 *process_data; 59 u8 *process_data; 61 int process_nr; 60 int process_nr; 62 int thread_nr; 61 int thread_nr; 63 int task_nr; 62 int task_nr; 64 unsigned int loops_done; 63 unsigned int loops_done; 65 u64 val; 64 u64 val; 66 u64 runtime_ns; 65 u64 runtime_ns; 67 u64 system_time_ns 66 u64 system_time_ns; 68 u64 user_time_ns; 67 u64 user_time_ns; 69 double speed_gbs; 68 double speed_gbs; 70 struct mutex *process_lock; !! 69 pthread_mutex_t *process_lock; 71 }; 70 }; 72 71 73 /* Parameters set by options: */ 72 /* Parameters set by options: */ 74 73 75 struct params { 74 struct params { 76 /* Startup synchronization: */ 75 /* Startup synchronization: */ 77 bool serialize_star 76 bool serialize_startup; 78 77 79 /* Task hierarchy: */ 78 /* Task hierarchy: */ 80 int nr_proc; 79 int nr_proc; 81 int nr_threads; 80 int nr_threads; 82 81 83 /* Working set sizes: */ 82 /* Working set sizes: */ 84 const char *mb_global_str 83 const char *mb_global_str; 85 const char *mb_proc_str; 84 const char *mb_proc_str; 86 const char *mb_proc_locke 85 const char *mb_proc_locked_str; 87 const char *mb_thread_str 86 const char *mb_thread_str; 88 87 89 double mb_global; 88 double mb_global; 90 double mb_proc; 89 double mb_proc; 91 double mb_proc_locked 90 double mb_proc_locked; 92 double mb_thread; 91 double mb_thread; 93 92 94 /* Access patterns to the working set: 93 /* Access patterns to the working set: */ 95 bool data_reads; 94 bool data_reads; 96 bool data_writes; 95 bool data_writes; 97 bool data_backwards 96 bool data_backwards; 98 bool data_zero_mems 97 bool data_zero_memset; 99 bool data_rand_walk 98 bool data_rand_walk; 100 u32 nr_loops; 99 u32 nr_loops; 101 u32 nr_secs; 100 u32 nr_secs; 102 u32 sleep_usecs; 101 u32 sleep_usecs; 103 102 104 /* Working set initialization: */ 103 /* Working set initialization: */ 105 bool init_zero; 104 bool init_zero; 106 bool init_random; 105 bool init_random; 107 bool init_cpu0; 106 bool init_cpu0; 108 107 109 /* Misc options: */ 108 /* Misc options: */ 110 int show_details; 109 int show_details; 111 int run_all; 110 int run_all; 112 int thp; 111 int thp; 113 112 114 long bytes_global; 113 long bytes_global; 115 long bytes_process; 114 long bytes_process; 116 long bytes_process_ 115 long bytes_process_locked; 117 long bytes_thread; 116 long bytes_thread; 118 117 119 int nr_tasks; 118 int nr_tasks; >> 119 bool show_quiet; 120 120 121 bool show_convergen 121 bool show_convergence; 122 bool measure_conver 122 bool measure_convergence; 123 123 124 int perturb_secs; 124 int perturb_secs; 125 int nr_cpus; 125 int nr_cpus; 126 int nr_nodes; 126 int nr_nodes; 127 127 128 /* Affinity options -C and -N: */ 128 /* Affinity options -C and -N: */ 129 char *cpu_list_str; 129 char *cpu_list_str; 130 char *node_list_str 130 char *node_list_str; 131 }; 131 }; 132 132 133 133 134 /* Global, read-writable area, accessible to a 134 /* Global, read-writable area, accessible to all processes and threads: */ 135 135 136 struct global_info { 136 struct global_info { 137 u8 *data; 137 u8 *data; 138 138 139 struct mutex startup_mutex; !! 139 pthread_mutex_t startup_mutex; 140 struct cond startup_cond; !! 140 pthread_cond_t startup_cond; 141 int nr_tasks_start 141 int nr_tasks_started; 142 142 143 struct mutex start_work_mut !! 143 pthread_mutex_t start_work_mutex; 144 struct cond start_work_con !! 144 pthread_cond_t start_work_cond; 145 int nr_tasks_worki 145 int nr_tasks_working; 146 bool start_work; 146 bool start_work; 147 147 148 struct mutex stop_work_mute !! 148 pthread_mutex_t stop_work_mutex; 149 u64 bytes_done; 149 u64 bytes_done; 150 150 151 struct thread_data *threads; 151 struct thread_data *threads; 152 152 153 /* Convergence latency measurement: */ 153 /* Convergence latency measurement: */ 154 bool all_converged; 154 bool all_converged; 155 bool stop_work; 155 bool stop_work; 156 156 157 int print_once; 157 int print_once; 158 158 159 struct params p; 159 struct params p; 160 }; 160 }; 161 161 162 static struct global_info *g = NULL; 162 static struct global_info *g = NULL; 163 163 164 static int parse_cpus_opt(const struct option 164 static int parse_cpus_opt(const struct option *opt, const char *arg, int unset); 165 static int parse_nodes_opt(const struct option 165 static int parse_nodes_opt(const struct option *opt, const char *arg, int unset); 166 166 167 struct params p0; 167 struct params p0; 168 168 169 static const struct option options[] = { 169 static const struct option options[] = { 170 OPT_INTEGER('p', "nr_proc" , &p0. 170 OPT_INTEGER('p', "nr_proc" , &p0.nr_proc, "number of processes"), 171 OPT_INTEGER('t', "nr_threads" , &p0. 171 OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"), 172 172 173 OPT_STRING('G', "mb_global" , &p0. 173 OPT_STRING('G', "mb_global" , &p0.mb_global_str, "MB", "global memory (MBs)"), 174 OPT_STRING('P', "mb_proc" , &p0. 174 OPT_STRING('P', "mb_proc" , &p0.mb_proc_str, "MB", "process memory (MBs)"), 175 OPT_STRING('L', "mb_proc_locked", &p0. 175 OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"), 176 OPT_STRING('T', "mb_thread" , &p0. 176 OPT_STRING('T', "mb_thread" , &p0.mb_thread_str, "MB", "thread memory (MBs)"), 177 177 178 OPT_UINTEGER('l', "nr_loops" , &p0. 178 OPT_UINTEGER('l', "nr_loops" , &p0.nr_loops, "max number of loops to run (default: unlimited)"), 179 OPT_UINTEGER('s', "nr_secs" , &p0. 179 OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run (default: 5 secs)"), 180 OPT_UINTEGER('u', "usleep" , &p0. 180 OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"), 181 181 182 OPT_BOOLEAN('R', "data_reads" , &p0. 182 OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via reads (can be mixed with -W)"), 183 OPT_BOOLEAN('W', "data_writes" , &p0. 183 OPT_BOOLEAN('W', "data_writes" , &p0.data_writes, "access the data via writes (can be mixed with -R)"), 184 OPT_BOOLEAN('B', "data_backwards", &p0 184 OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards, "access the data backwards as well"), 185 OPT_BOOLEAN('Z', "data_zero_memset", & 185 OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"), 186 OPT_BOOLEAN('r', "data_rand_walk", &p0 186 OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk, "access the data with random (32bit LFSR) walk"), 187 187 188 188 189 OPT_BOOLEAN('z', "init_zero" , &p0. 189 OPT_BOOLEAN('z', "init_zero" , &p0.init_zero, "bzero the initial allocations"), 190 OPT_BOOLEAN('I', "init_random" , &p0. 190 OPT_BOOLEAN('I', "init_random" , &p0.init_random, "randomize the contents of the initial allocations"), 191 OPT_BOOLEAN('', "init_cpu0" , &p0.i 191 OPT_BOOLEAN('', "init_cpu0" , &p0.init_cpu0, "do the initial allocations on CPU#0"), 192 OPT_INTEGER('x', "perturb_secs", &p0.p 192 OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs, "perturb thread 0/0 every X secs, to test convergence stability"), 193 193 194 OPT_INCR ('d', "show_details" , &p0. 194 OPT_INCR ('d', "show_details" , &p0.show_details, "Show details"), 195 OPT_INCR ('a', "all" , &p0. 195 OPT_INCR ('a', "all" , &p0.run_all, "Run all tests in the suite"), 196 OPT_INTEGER('H', "thp" , &p0. 196 OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"), 197 OPT_BOOLEAN('c', "show_convergence", & 197 OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, " 198 "convergence is reached wh 198 "convergence is reached when each process (all its threads) is running on a single NUMA node."), 199 OPT_BOOLEAN('m', "measure_convergence" 199 OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"), 200 OPT_BOOLEAN('q', "quiet" , &qui !! 200 OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "quiet mode"), 201 "quiet mode (do not show a << 202 OPT_BOOLEAN('S', "serialize-startup", 201 OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"), 203 202 204 /* Special option string parsing callb 203 /* Special option string parsing callbacks: */ 205 OPT_CALLBACK('C', "cpus", NULL, "cpu[, 204 OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]", 206 "bind the first N task 205 "bind the first N tasks to these specific cpus (the rest is unbound)", 207 parse_cpus_opt), 206 parse_cpus_opt), 208 OPT_CALLBACK('M', "memnodes", NULL, "n 207 OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]", 209 "bind the first N task 208 "bind the first N tasks to these specific memory nodes (the rest is unbound)", 210 parse_nodes_opt), 209 parse_nodes_opt), 211 OPT_END() 210 OPT_END() 212 }; 211 }; 213 212 214 static const char * const bench_numa_usage[] = 213 static const char * const bench_numa_usage[] = { 215 "perf bench numa <options>", 214 "perf bench numa <options>", 216 NULL 215 NULL 217 }; 216 }; 218 217 219 static const char * const numa_usage[] = { 218 static const char * const numa_usage[] = { 220 "perf bench numa mem [<options>]", 219 "perf bench numa mem [<options>]", 221 NULL 220 NULL 222 }; 221 }; 223 222 224 /* 223 /* 225 * To get number of numa nodes present. 224 * To get number of numa nodes present. 226 */ 225 */ 227 static int nr_numa_nodes(void) 226 static int nr_numa_nodes(void) 228 { 227 { 229 int i, nr_nodes = 0; 228 int i, nr_nodes = 0; 230 229 231 for (i = 0; i < g->p.nr_nodes; i++) { 230 for (i = 0; i < g->p.nr_nodes; i++) { 232 if (numa_bitmask_isbitset(numa 231 if (numa_bitmask_isbitset(numa_nodes_ptr, i)) 233 nr_nodes++; 232 nr_nodes++; 234 } 233 } 235 234 236 return nr_nodes; 235 return nr_nodes; 237 } 236 } 238 237 239 /* 238 /* 240 * To check if given numa node is present. 239 * To check if given numa node is present. 241 */ 240 */ 242 static int is_node_present(int node) 241 static int is_node_present(int node) 243 { 242 { 244 return numa_bitmask_isbitset(numa_node 243 return numa_bitmask_isbitset(numa_nodes_ptr, node); 245 } 244 } 246 245 247 /* 246 /* 248 * To check given numa node has cpus. 247 * To check given numa node has cpus. 249 */ 248 */ 250 static bool node_has_cpus(int node) 249 static bool node_has_cpus(int node) 251 { 250 { 252 struct bitmask *cpumask = numa_allocat 251 struct bitmask *cpumask = numa_allocate_cpumask(); 253 bool ret = false; /* fall back to nocp 252 bool ret = false; /* fall back to nocpus */ 254 int cpu; 253 int cpu; 255 254 256 BUG_ON(!cpumask); 255 BUG_ON(!cpumask); 257 if (!numa_node_to_cpus(node, cpumask)) 256 if (!numa_node_to_cpus(node, cpumask)) { 258 for (cpu = 0; cpu < (int)cpuma 257 for (cpu = 0; cpu < (int)cpumask->size; cpu++) { 259 if (numa_bitmask_isbit 258 if (numa_bitmask_isbitset(cpumask, cpu)) { 260 ret = true; 259 ret = true; 261 break; 260 break; 262 } 261 } 263 } 262 } 264 } 263 } 265 numa_free_cpumask(cpumask); 264 numa_free_cpumask(cpumask); 266 265 267 return ret; 266 return ret; 268 } 267 } 269 268 270 static cpu_set_t *bind_to_cpu(int target_cpu) !! 269 static cpu_set_t bind_to_cpu(int target_cpu) 271 { 270 { 272 int nrcpus = numa_num_possible_cpus(); !! 271 cpu_set_t orig_mask, mask; 273 cpu_set_t *orig_mask, *mask; !! 272 int ret; 274 size_t size; << 275 << 276 orig_mask = CPU_ALLOC(nrcpus); << 277 BUG_ON(!orig_mask); << 278 size = CPU_ALLOC_SIZE(nrcpus); << 279 CPU_ZERO_S(size, orig_mask); << 280 << 281 if (sched_getaffinity(0, size, orig_ma << 282 goto err_out; << 283 273 284 mask = CPU_ALLOC(nrcpus); !! 274 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); 285 if (!mask) !! 275 BUG_ON(ret); 286 goto err_out; << 287 276 288 CPU_ZERO_S(size, mask); !! 277 CPU_ZERO(&mask); 289 278 290 if (target_cpu == -1) { 279 if (target_cpu == -1) { 291 int cpu; 280 int cpu; 292 281 293 for (cpu = 0; cpu < g->p.nr_cp 282 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) 294 CPU_SET_S(cpu, size, m !! 283 CPU_SET(cpu, &mask); 295 } else { 284 } else { 296 if (target_cpu < 0 || target_c !! 285 BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus); 297 goto err; !! 286 CPU_SET(target_cpu, &mask); 298 << 299 CPU_SET_S(target_cpu, size, ma << 300 } 287 } 301 288 302 if (sched_setaffinity(0, size, mask)) !! 289 ret = sched_setaffinity(0, sizeof(mask), &mask); 303 goto err; !! 290 BUG_ON(ret); 304 291 305 return orig_mask; 292 return orig_mask; 306 << 307 err: << 308 CPU_FREE(mask); << 309 err_out: << 310 CPU_FREE(orig_mask); << 311 << 312 /* BUG_ON due to failure in allocation << 313 BUG_ON(-1); << 314 return NULL; << 315 } 293 } 316 294 317 static cpu_set_t *bind_to_node(int target_node !! 295 static cpu_set_t bind_to_node(int target_node) 318 { 296 { 319 int nrcpus = numa_num_possible_cpus(); !! 297 cpu_set_t orig_mask, mask; 320 size_t size; << 321 cpu_set_t *orig_mask, *mask; << 322 int cpu; 298 int cpu; >> 299 int ret; 323 300 324 orig_mask = CPU_ALLOC(nrcpus); !! 301 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); 325 BUG_ON(!orig_mask); !! 302 BUG_ON(ret); 326 size = CPU_ALLOC_SIZE(nrcpus); << 327 CPU_ZERO_S(size, orig_mask); << 328 << 329 if (sched_getaffinity(0, size, orig_ma << 330 goto err_out; << 331 << 332 mask = CPU_ALLOC(nrcpus); << 333 if (!mask) << 334 goto err_out; << 335 303 336 CPU_ZERO_S(size, mask); !! 304 CPU_ZERO(&mask); 337 305 338 if (target_node == NUMA_NO_NODE) { 306 if (target_node == NUMA_NO_NODE) { 339 for (cpu = 0; cpu < g->p.nr_cp 307 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) 340 CPU_SET_S(cpu, size, m !! 308 CPU_SET(cpu, &mask); 341 } else { 309 } else { 342 struct bitmask *cpumask = numa 310 struct bitmask *cpumask = numa_allocate_cpumask(); 343 311 344 if (!cpumask) !! 312 BUG_ON(!cpumask); 345 goto err; << 346 << 347 if (!numa_node_to_cpus(target_ 313 if (!numa_node_to_cpus(target_node, cpumask)) { 348 for (cpu = 0; cpu < (i 314 for (cpu = 0; cpu < (int)cpumask->size; cpu++) { 349 if (numa_bitma 315 if (numa_bitmask_isbitset(cpumask, cpu)) 350 CPU_SE !! 316 CPU_SET(cpu, &mask); 351 } 317 } 352 } 318 } 353 numa_free_cpumask(cpumask); 319 numa_free_cpumask(cpumask); 354 } 320 } 355 321 356 if (sched_setaffinity(0, size, mask)) !! 322 ret = sched_setaffinity(0, sizeof(mask), &mask); 357 goto err; !! 323 BUG_ON(ret); 358 324 359 return orig_mask; 325 return orig_mask; 360 << 361 err: << 362 CPU_FREE(mask); << 363 err_out: << 364 CPU_FREE(orig_mask); << 365 << 366 /* BUG_ON due to failure in allocation << 367 BUG_ON(-1); << 368 return NULL; << 369 } 326 } 370 327 371 static void bind_to_cpumask(cpu_set_t *mask) !! 328 static void bind_to_cpumask(cpu_set_t mask) 372 { 329 { 373 int ret; 330 int ret; 374 size_t size = CPU_ALLOC_SIZE(numa_num_ << 375 331 376 ret = sched_setaffinity(0, size, mask) !! 332 ret = sched_setaffinity(0, sizeof(mask), &mask); 377 if (ret) { !! 333 BUG_ON(ret); 378 CPU_FREE(mask); << 379 BUG_ON(ret); << 380 } << 381 } 334 } 382 335 383 static void mempol_restore(void) 336 static void mempol_restore(void) 384 { 337 { 385 int ret; 338 int ret; 386 339 387 ret = set_mempolicy(MPOL_DEFAULT, NULL 340 ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1); 388 341 389 BUG_ON(ret); 342 BUG_ON(ret); 390 } 343 } 391 344 392 static void bind_to_memnode(int node) 345 static void bind_to_memnode(int node) 393 { 346 { 394 struct bitmask *node_mask; 347 struct bitmask *node_mask; 395 int ret; 348 int ret; 396 349 397 if (node == NUMA_NO_NODE) 350 if (node == NUMA_NO_NODE) 398 return; 351 return; 399 352 400 node_mask = numa_allocate_nodemask(); 353 node_mask = numa_allocate_nodemask(); 401 BUG_ON(!node_mask); 354 BUG_ON(!node_mask); 402 355 403 numa_bitmask_clearall(node_mask); 356 numa_bitmask_clearall(node_mask); 404 numa_bitmask_setbit(node_mask, node); 357 numa_bitmask_setbit(node_mask, node); 405 358 406 ret = set_mempolicy(MPOL_BIND, node_ma 359 ret = set_mempolicy(MPOL_BIND, node_mask->maskp, node_mask->size + 1); 407 dprintf("binding to node %d, mask: %01 360 dprintf("binding to node %d, mask: %016lx => %d\n", node, *node_mask->maskp, ret); 408 361 409 numa_bitmask_free(node_mask); 362 numa_bitmask_free(node_mask); 410 BUG_ON(ret); 363 BUG_ON(ret); 411 } 364 } 412 365 413 #define HPSIZE (2*1024*1024) 366 #define HPSIZE (2*1024*1024) 414 367 415 #define set_taskname(fmt...) 368 #define set_taskname(fmt...) \ 416 do { 369 do { \ 417 char name[20]; 370 char name[20]; \ 418 371 \ 419 snprintf(name, 20, fmt); 372 snprintf(name, 20, fmt); \ 420 prctl(PR_SET_NAME, name); 373 prctl(PR_SET_NAME, name); \ 421 } while (0) 374 } while (0) 422 375 423 static u8 *alloc_data(ssize_t bytes0, int map_ 376 static u8 *alloc_data(ssize_t bytes0, int map_flags, 424 int init_zero, int init_ 377 int init_zero, int init_cpu0, int thp, int init_random) 425 { 378 { 426 cpu_set_t *orig_mask = NULL; !! 379 cpu_set_t orig_mask; 427 ssize_t bytes; 380 ssize_t bytes; 428 u8 *buf; 381 u8 *buf; 429 int ret; 382 int ret; 430 383 431 if (!bytes0) 384 if (!bytes0) 432 return NULL; 385 return NULL; 433 386 434 /* Allocate and initialize all memory 387 /* Allocate and initialize all memory on CPU#0: */ 435 if (init_cpu0) { 388 if (init_cpu0) { 436 int node = numa_node_of_cpu(0) 389 int node = numa_node_of_cpu(0); 437 390 438 orig_mask = bind_to_node(node) 391 orig_mask = bind_to_node(node); 439 bind_to_memnode(node); 392 bind_to_memnode(node); 440 } 393 } 441 394 442 bytes = bytes0 + HPSIZE; 395 bytes = bytes0 + HPSIZE; 443 396 444 buf = (void *)mmap(0, bytes, PROT_READ 397 buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0); 445 BUG_ON(buf == (void *)-1); 398 BUG_ON(buf == (void *)-1); 446 399 447 if (map_flags == MAP_PRIVATE) { 400 if (map_flags == MAP_PRIVATE) { 448 if (thp > 0) { 401 if (thp > 0) { 449 ret = madvise(buf, byt 402 ret = madvise(buf, bytes, MADV_HUGEPAGE); 450 if (ret && !g->print_o 403 if (ret && !g->print_once) { 451 g->print_once 404 g->print_once = 1; 452 printf("WARNIN 405 printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n"); 453 } 406 } 454 } 407 } 455 if (thp < 0) { 408 if (thp < 0) { 456 ret = madvise(buf, byt 409 ret = madvise(buf, bytes, MADV_NOHUGEPAGE); 457 if (ret && !g->print_o 410 if (ret && !g->print_once) { 458 g->print_once 411 g->print_once = 1; 459 printf("WARNIN 412 printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n"); 460 } 413 } 461 } 414 } 462 } 415 } 463 416 464 if (init_zero) { 417 if (init_zero) { 465 bzero(buf, bytes); 418 bzero(buf, bytes); 466 } else { 419 } else { 467 /* Initialize random contents, 420 /* Initialize random contents, different in each word: */ 468 if (init_random) { 421 if (init_random) { 469 u64 *wbuf = (void *)bu 422 u64 *wbuf = (void *)buf; 470 long off = rand(); 423 long off = rand(); 471 long i; 424 long i; 472 425 473 for (i = 0; i < bytes/ 426 for (i = 0; i < bytes/8; i++) 474 wbuf[i] = i + 427 wbuf[i] = i + off; 475 } 428 } 476 } 429 } 477 430 478 /* Align to 2MB boundary: */ 431 /* Align to 2MB boundary: */ 479 buf = (void *)(((unsigned long)buf + H 432 buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1)); 480 433 481 /* Restore affinity: */ 434 /* Restore affinity: */ 482 if (init_cpu0) { 435 if (init_cpu0) { 483 bind_to_cpumask(orig_mask); 436 bind_to_cpumask(orig_mask); 484 CPU_FREE(orig_mask); << 485 mempol_restore(); 437 mempol_restore(); 486 } 438 } 487 439 488 return buf; 440 return buf; 489 } 441 } 490 442 491 static void free_data(void *data, ssize_t byte 443 static void free_data(void *data, ssize_t bytes) 492 { 444 { 493 int ret; 445 int ret; 494 446 495 if (!data) 447 if (!data) 496 return; 448 return; 497 449 498 ret = munmap(data, bytes); 450 ret = munmap(data, bytes); 499 BUG_ON(ret); 451 BUG_ON(ret); 500 } 452 } 501 453 502 /* 454 /* 503 * Create a shared memory buffer that can be s 455 * Create a shared memory buffer that can be shared between processes, zeroed: 504 */ 456 */ 505 static void * zalloc_shared_data(ssize_t bytes 457 static void * zalloc_shared_data(ssize_t bytes) 506 { 458 { 507 return alloc_data(bytes, MAP_SHARED, 1 459 return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0, g->p.thp, g->p.init_random); 508 } 460 } 509 461 510 /* 462 /* 511 * Create a shared memory buffer that can be s 463 * Create a shared memory buffer that can be shared between processes: 512 */ 464 */ 513 static void * setup_shared_data(ssize_t bytes) 465 static void * setup_shared_data(ssize_t bytes) 514 { 466 { 515 return alloc_data(bytes, MAP_SHARED, 0 467 return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); 516 } 468 } 517 469 518 /* 470 /* 519 * Allocate process-local memory - this will e 471 * Allocate process-local memory - this will either be shared between 520 * threads of this process, or only be accesse 472 * threads of this process, or only be accessed by this thread: 521 */ 473 */ 522 static void * setup_private_data(ssize_t bytes 474 static void * setup_private_data(ssize_t bytes) 523 { 475 { 524 return alloc_data(bytes, MAP_PRIVATE, 476 return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); 525 } 477 } 526 478 >> 479 /* >> 480 * Return a process-shared (global) mutex: >> 481 */ >> 482 static void init_global_mutex(pthread_mutex_t *mutex) >> 483 { >> 484 pthread_mutexattr_t attr; >> 485 >> 486 pthread_mutexattr_init(&attr); >> 487 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); >> 488 pthread_mutex_init(mutex, &attr); >> 489 } >> 490 >> 491 /* >> 492 * Return a process-shared (global) condition variable: >> 493 */ >> 494 static void init_global_cond(pthread_cond_t *cond) >> 495 { >> 496 pthread_condattr_t attr; >> 497 >> 498 pthread_condattr_init(&attr); >> 499 pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); >> 500 pthread_cond_init(cond, &attr); >> 501 } >> 502 527 static int parse_cpu_list(const char *arg) 503 static int parse_cpu_list(const char *arg) 528 { 504 { 529 p0.cpu_list_str = strdup(arg); 505 p0.cpu_list_str = strdup(arg); 530 506 531 dprintf("got CPU list: {%s}\n", p0.cpu 507 dprintf("got CPU list: {%s}\n", p0.cpu_list_str); 532 508 533 return 0; 509 return 0; 534 } 510 } 535 511 536 static int parse_setup_cpu_list(void) 512 static int parse_setup_cpu_list(void) 537 { 513 { 538 struct thread_data *td; 514 struct thread_data *td; 539 char *str0, *str; 515 char *str0, *str; 540 int t; 516 int t; 541 517 542 if (!g->p.cpu_list_str) 518 if (!g->p.cpu_list_str) 543 return 0; 519 return 0; 544 520 545 dprintf("g->p.nr_tasks: %d\n", g->p.nr 521 dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); 546 522 547 str0 = str = strdup(g->p.cpu_list_str) 523 str0 = str = strdup(g->p.cpu_list_str); 548 t = 0; 524 t = 0; 549 525 550 BUG_ON(!str); 526 BUG_ON(!str); 551 527 552 tprintf("# binding tasks to CPUs:\n"); 528 tprintf("# binding tasks to CPUs:\n"); 553 tprintf("# "); 529 tprintf("# "); 554 530 555 while (true) { 531 while (true) { 556 int bind_cpu, bind_cpu_0, bind 532 int bind_cpu, bind_cpu_0, bind_cpu_1; 557 char *tok, *tok_end, *tok_step 533 char *tok, *tok_end, *tok_step, *tok_len, *tok_mul; 558 int bind_len; 534 int bind_len; 559 int step; 535 int step; 560 int mul; 536 int mul; 561 537 562 tok = strsep(&str, ","); 538 tok = strsep(&str, ","); 563 if (!tok) 539 if (!tok) 564 break; 540 break; 565 541 566 tok_end = strstr(tok, "-"); 542 tok_end = strstr(tok, "-"); 567 543 568 dprintf("\ntoken: {%s}, end: { 544 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end); 569 if (!tok_end) { 545 if (!tok_end) { 570 /* Single CPU specifie 546 /* Single CPU specified: */ 571 bind_cpu_0 = bind_cpu_ 547 bind_cpu_0 = bind_cpu_1 = atol(tok); 572 } else { 548 } else { 573 /* CPU range specified 549 /* CPU range specified (for example: "5-11"): */ 574 bind_cpu_0 = atol(tok) 550 bind_cpu_0 = atol(tok); 575 bind_cpu_1 = atol(tok_ 551 bind_cpu_1 = atol(tok_end + 1); 576 } 552 } 577 553 578 step = 1; 554 step = 1; 579 tok_step = strstr(tok, "#"); 555 tok_step = strstr(tok, "#"); 580 if (tok_step) { 556 if (tok_step) { 581 step = atol(tok_step + 557 step = atol(tok_step + 1); 582 BUG_ON(step <= 0 || st 558 BUG_ON(step <= 0 || step >= g->p.nr_cpus); 583 } 559 } 584 560 585 /* 561 /* 586 * Mask length. 562 * Mask length. 587 * Eg: "--cpus 8_4-16#4" means 563 * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4', 588 * where the _4 means the next 564 * where the _4 means the next 4 CPUs are allowed. 589 */ 565 */ 590 bind_len = 1; 566 bind_len = 1; 591 tok_len = strstr(tok, "_"); 567 tok_len = strstr(tok, "_"); 592 if (tok_len) { 568 if (tok_len) { 593 bind_len = atol(tok_le 569 bind_len = atol(tok_len + 1); 594 BUG_ON(bind_len <= 0 | 570 BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus); 595 } 571 } 596 572 597 /* Multiplicator shortcut, "0x 573 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */ 598 mul = 1; 574 mul = 1; 599 tok_mul = strstr(tok, "x"); 575 tok_mul = strstr(tok, "x"); 600 if (tok_mul) { 576 if (tok_mul) { 601 mul = atol(tok_mul + 1 577 mul = atol(tok_mul + 1); 602 BUG_ON(mul <= 0); 578 BUG_ON(mul <= 0); 603 } 579 } 604 580 605 dprintf("CPUs: %d_%d-%d#%dx%d\ 581 dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul); 606 582 607 if (bind_cpu_0 >= g->p.nr_cpus 583 if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) { 608 printf("\nTest not app 584 printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus); 609 return -1; 585 return -1; 610 } 586 } 611 587 612 if (is_cpu_online(bind_cpu_0) << 613 printf("\nTest not app << 614 return -1; << 615 } << 616 << 617 BUG_ON(bind_cpu_0 < 0 || bind_ 588 BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0); 618 BUG_ON(bind_cpu_0 > bind_cpu_1 589 BUG_ON(bind_cpu_0 > bind_cpu_1); 619 590 620 for (bind_cpu = bind_cpu_0; bi 591 for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) { 621 size_t size = CPU_ALLO << 622 int i; 592 int i; 623 593 624 for (i = 0; i < mul; i 594 for (i = 0; i < mul; i++) { 625 int cpu; 595 int cpu; 626 596 627 if (t >= g->p. 597 if (t >= g->p.nr_tasks) { 628 printf 598 printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu); 629 goto o 599 goto out; 630 } 600 } 631 td = g->thread 601 td = g->threads + t; 632 602 633 if (t) 603 if (t) 634 tprint 604 tprintf(","); 635 if (bind_len > 605 if (bind_len > 1) { 636 tprint 606 tprintf("%2d/%d", bind_cpu, bind_len); 637 } else { 607 } else { 638 tprint 608 tprintf("%2d", bind_cpu); 639 } 609 } 640 610 641 td->bind_cpuma !! 611 CPU_ZERO(&td->bind_cpumask); 642 BUG_ON(!td->bi << 643 CPU_ZERO_S(siz << 644 for (cpu = bin 612 for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) { 645 if (cp !! 613 BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus); 646 !! 614 CPU_SET(cpu, &td->bind_cpumask); 647 << 648 } << 649 CPU_SE << 650 } 615 } 651 t++; 616 t++; 652 } 617 } 653 } 618 } 654 } 619 } 655 out: 620 out: 656 621 657 tprintf("\n"); 622 tprintf("\n"); 658 623 659 if (t < g->p.nr_tasks) 624 if (t < g->p.nr_tasks) 660 printf("# NOTE: %d tasks bound 625 printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t); 661 626 662 free(str0); 627 free(str0); 663 return 0; 628 return 0; 664 } 629 } 665 630 666 static int parse_cpus_opt(const struct option 631 static int parse_cpus_opt(const struct option *opt __maybe_unused, 667 const char *arg, int 632 const char *arg, int unset __maybe_unused) 668 { 633 { 669 if (!arg) 634 if (!arg) 670 return -1; 635 return -1; 671 636 672 return parse_cpu_list(arg); 637 return parse_cpu_list(arg); 673 } 638 } 674 639 675 static int parse_node_list(const char *arg) 640 static int parse_node_list(const char *arg) 676 { 641 { 677 p0.node_list_str = strdup(arg); 642 p0.node_list_str = strdup(arg); 678 643 679 dprintf("got NODE list: {%s}\n", p0.no 644 dprintf("got NODE list: {%s}\n", p0.node_list_str); 680 645 681 return 0; 646 return 0; 682 } 647 } 683 648 684 static int parse_setup_node_list(void) 649 static int parse_setup_node_list(void) 685 { 650 { 686 struct thread_data *td; 651 struct thread_data *td; 687 char *str0, *str; 652 char *str0, *str; 688 int t; 653 int t; 689 654 690 if (!g->p.node_list_str) 655 if (!g->p.node_list_str) 691 return 0; 656 return 0; 692 657 693 dprintf("g->p.nr_tasks: %d\n", g->p.nr 658 dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); 694 659 695 str0 = str = strdup(g->p.node_list_str 660 str0 = str = strdup(g->p.node_list_str); 696 t = 0; 661 t = 0; 697 662 698 BUG_ON(!str); 663 BUG_ON(!str); 699 664 700 tprintf("# binding tasks to NODEs:\n") 665 tprintf("# binding tasks to NODEs:\n"); 701 tprintf("# "); 666 tprintf("# "); 702 667 703 while (true) { 668 while (true) { 704 int bind_node, bind_node_0, bi 669 int bind_node, bind_node_0, bind_node_1; 705 char *tok, *tok_end, *tok_step 670 char *tok, *tok_end, *tok_step, *tok_mul; 706 int step; 671 int step; 707 int mul; 672 int mul; 708 673 709 tok = strsep(&str, ","); 674 tok = strsep(&str, ","); 710 if (!tok) 675 if (!tok) 711 break; 676 break; 712 677 713 tok_end = strstr(tok, "-"); 678 tok_end = strstr(tok, "-"); 714 679 715 dprintf("\ntoken: {%s}, end: { 680 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end); 716 if (!tok_end) { 681 if (!tok_end) { 717 /* Single NODE specifi 682 /* Single NODE specified: */ 718 bind_node_0 = bind_nod 683 bind_node_0 = bind_node_1 = atol(tok); 719 } else { 684 } else { 720 /* NODE range specifie 685 /* NODE range specified (for example: "5-11"): */ 721 bind_node_0 = atol(tok 686 bind_node_0 = atol(tok); 722 bind_node_1 = atol(tok 687 bind_node_1 = atol(tok_end + 1); 723 } 688 } 724 689 725 step = 1; 690 step = 1; 726 tok_step = strstr(tok, "#"); 691 tok_step = strstr(tok, "#"); 727 if (tok_step) { 692 if (tok_step) { 728 step = atol(tok_step + 693 step = atol(tok_step + 1); 729 BUG_ON(step <= 0 || st 694 BUG_ON(step <= 0 || step >= g->p.nr_nodes); 730 } 695 } 731 696 732 /* Multiplicator shortcut, "0x 697 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */ 733 mul = 1; 698 mul = 1; 734 tok_mul = strstr(tok, "x"); 699 tok_mul = strstr(tok, "x"); 735 if (tok_mul) { 700 if (tok_mul) { 736 mul = atol(tok_mul + 1 701 mul = atol(tok_mul + 1); 737 BUG_ON(mul <= 0); 702 BUG_ON(mul <= 0); 738 } 703 } 739 704 740 dprintf("NODEs: %d-%d #%d\n", 705 dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step); 741 706 742 if (bind_node_0 >= g->p.nr_nod 707 if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) { 743 printf("\nTest not app 708 printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes); 744 return -1; 709 return -1; 745 } 710 } 746 711 747 BUG_ON(bind_node_0 < 0 || bind 712 BUG_ON(bind_node_0 < 0 || bind_node_1 < 0); 748 BUG_ON(bind_node_0 > bind_node 713 BUG_ON(bind_node_0 > bind_node_1); 749 714 750 for (bind_node = bind_node_0; 715 for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) { 751 int i; 716 int i; 752 717 753 for (i = 0; i < mul; i 718 for (i = 0; i < mul; i++) { 754 if (t >= g->p. 719 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) { 755 printf 720 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); 756 goto o 721 goto out; 757 } 722 } 758 td = g->thread 723 td = g->threads + t; 759 724 760 if (!t) 725 if (!t) 761 tprint 726 tprintf(" %2d", bind_node); 762 else 727 else 763 tprint 728 tprintf(",%2d", bind_node); 764 729 765 td->bind_node 730 td->bind_node = bind_node; 766 t++; 731 t++; 767 } 732 } 768 } 733 } 769 } 734 } 770 out: 735 out: 771 736 772 tprintf("\n"); 737 tprintf("\n"); 773 738 774 if (t < g->p.nr_tasks) 739 if (t < g->p.nr_tasks) 775 printf("# NOTE: %d tasks mem-b 740 printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t); 776 741 777 free(str0); 742 free(str0); 778 return 0; 743 return 0; 779 } 744 } 780 745 781 static int parse_nodes_opt(const struct option 746 static int parse_nodes_opt(const struct option *opt __maybe_unused, 782 const char *arg, int 747 const char *arg, int unset __maybe_unused) 783 { 748 { 784 if (!arg) 749 if (!arg) 785 return -1; 750 return -1; 786 751 787 return parse_node_list(arg); 752 return parse_node_list(arg); 788 } 753 } 789 754 >> 755 #define BIT(x) (1ul << x) >> 756 790 static inline uint32_t lfsr_32(uint32_t lfsr) 757 static inline uint32_t lfsr_32(uint32_t lfsr) 791 { 758 { 792 const uint32_t taps = BIT(1) | BIT(5) 759 const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31); 793 return (lfsr>>1) ^ ((0x0u - (lfsr & 0x 760 return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps); 794 } 761 } 795 762 796 /* 763 /* 797 * Make sure there's real data dependency to R 764 * Make sure there's real data dependency to RAM (when read 798 * accesses are enabled), so the compiler, the 765 * accesses are enabled), so the compiler, the CPU and the 799 * kernel (KSM, zero page, etc.) cannot optimi 766 * kernel (KSM, zero page, etc.) cannot optimize away RAM 800 * accesses: 767 * accesses: 801 */ 768 */ 802 static inline u64 access_data(u64 *data, u64 v 769 static inline u64 access_data(u64 *data, u64 val) 803 { 770 { 804 if (g->p.data_reads) 771 if (g->p.data_reads) 805 val += *data; 772 val += *data; 806 if (g->p.data_writes) 773 if (g->p.data_writes) 807 *data = val + 1; 774 *data = val + 1; 808 return val; 775 return val; 809 } 776 } 810 777 811 /* 778 /* 812 * The worker process does two types of work, 779 * The worker process does two types of work, a forwards going 813 * loop and a backwards going loop. 780 * loop and a backwards going loop. 814 * 781 * 815 * We do this so that on multiprocessor system 782 * We do this so that on multiprocessor systems we do not create 816 * a 'train' of processing, with highly synchr 783 * a 'train' of processing, with highly synchronized processes, 817 * skewing the whole benchmark. 784 * skewing the whole benchmark. 818 */ 785 */ 819 static u64 do_work(u8 *__data, long bytes, int 786 static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val) 820 { 787 { 821 long words = bytes/sizeof(u64); 788 long words = bytes/sizeof(u64); 822 u64 *data = (void *)__data; 789 u64 *data = (void *)__data; 823 long chunk_0, chunk_1; 790 long chunk_0, chunk_1; 824 u64 *d0, *d, *d1; 791 u64 *d0, *d, *d1; 825 long off; 792 long off; 826 long i; 793 long i; 827 794 828 BUG_ON(!data && words); 795 BUG_ON(!data && words); 829 BUG_ON(data && !words); 796 BUG_ON(data && !words); 830 797 831 if (!data) 798 if (!data) 832 return val; 799 return val; 833 800 834 /* Very simple memset() work variant: 801 /* Very simple memset() work variant: */ 835 if (g->p.data_zero_memset && !g->p.dat 802 if (g->p.data_zero_memset && !g->p.data_rand_walk) { 836 bzero(data, bytes); 803 bzero(data, bytes); 837 return val; 804 return val; 838 } 805 } 839 806 840 /* Spread out by PID/TID nr and by loo 807 /* Spread out by PID/TID nr and by loop nr: */ 841 chunk_0 = words/nr_max; 808 chunk_0 = words/nr_max; 842 chunk_1 = words/g->p.nr_loops; 809 chunk_1 = words/g->p.nr_loops; 843 off = nr*chunk_0 + loop*chunk_1; 810 off = nr*chunk_0 + loop*chunk_1; 844 811 845 while (off >= words) 812 while (off >= words) 846 off -= words; 813 off -= words; 847 814 848 if (g->p.data_rand_walk) { 815 if (g->p.data_rand_walk) { 849 u32 lfsr = nr + loop + val; 816 u32 lfsr = nr + loop + val; 850 long j; !! 817 int j; 851 818 852 for (i = 0; i < words/1024; i+ 819 for (i = 0; i < words/1024; i++) { 853 long start, end; 820 long start, end; 854 821 855 lfsr = lfsr_32(lfsr); 822 lfsr = lfsr_32(lfsr); 856 823 857 start = lfsr % words; 824 start = lfsr % words; 858 end = min(start + 1024 825 end = min(start + 1024, words-1); 859 826 860 if (g->p.data_zero_mem 827 if (g->p.data_zero_memset) { 861 bzero(data + s 828 bzero(data + start, (end-start) * sizeof(u64)); 862 } else { 829 } else { 863 for (j = start 830 for (j = start; j < end; j++) 864 val = 831 val = access_data(data + j, val); 865 } 832 } 866 } 833 } 867 } else if (!g->p.data_backwards || (nr 834 } else if (!g->p.data_backwards || (nr + loop) & 1) { 868 /* Process data forwards: */ 835 /* Process data forwards: */ 869 836 870 d0 = data + off; 837 d0 = data + off; 871 d = data + off + 1; 838 d = data + off + 1; 872 d1 = data + words; 839 d1 = data + words; 873 840 874 for (;;) { 841 for (;;) { 875 if (unlikely(d >= d1)) 842 if (unlikely(d >= d1)) 876 d = data; 843 d = data; 877 if (unlikely(d == d0)) 844 if (unlikely(d == d0)) 878 break; 845 break; 879 846 880 val = access_data(d, v 847 val = access_data(d, val); 881 848 882 d++; 849 d++; 883 } 850 } 884 } else { 851 } else { 885 /* Process data backwards: */ 852 /* Process data backwards: */ 886 853 887 d0 = data + off; 854 d0 = data + off; 888 d = data + off - 1; 855 d = data + off - 1; 889 d1 = data + words; 856 d1 = data + words; 890 857 891 for (;;) { 858 for (;;) { 892 if (unlikely(d < data) 859 if (unlikely(d < data)) 893 d = data + wor 860 d = data + words-1; 894 if (unlikely(d == d0)) 861 if (unlikely(d == d0)) 895 break; 862 break; 896 863 897 val = access_data(d, v 864 val = access_data(d, val); 898 865 899 d--; 866 d--; 900 } 867 } 901 } 868 } 902 869 903 return val; 870 return val; 904 } 871 } 905 872 906 static void update_curr_cpu(int task_nr, unsig 873 static void update_curr_cpu(int task_nr, unsigned long bytes_worked) 907 { 874 { 908 unsigned int cpu; 875 unsigned int cpu; 909 876 910 cpu = sched_getcpu(); 877 cpu = sched_getcpu(); 911 878 912 g->threads[task_nr].curr_cpu = cpu; 879 g->threads[task_nr].curr_cpu = cpu; 913 prctl(0, bytes_worked); 880 prctl(0, bytes_worked); 914 } 881 } 915 882 916 /* 883 /* 917 * Count the number of nodes a process's threa 884 * Count the number of nodes a process's threads 918 * are spread out on. 885 * are spread out on. 919 * 886 * 920 * A count of 1 means that the process is comp 887 * A count of 1 means that the process is compressed 921 * to a single node. A count of g->p.nr_nodes 888 * to a single node. A count of g->p.nr_nodes means it's 922 * spread out on the whole system. 889 * spread out on the whole system. 923 */ 890 */ 924 static int count_process_nodes(int process_nr) 891 static int count_process_nodes(int process_nr) 925 { 892 { 926 char *node_present; 893 char *node_present; 927 int nodes; 894 int nodes; 928 int n, t; 895 int n, t; 929 896 930 node_present = (char *)malloc(g->p.nr_ 897 node_present = (char *)malloc(g->p.nr_nodes * sizeof(char)); 931 BUG_ON(!node_present); 898 BUG_ON(!node_present); 932 for (nodes = 0; nodes < g->p.nr_nodes; 899 for (nodes = 0; nodes < g->p.nr_nodes; nodes++) 933 node_present[nodes] = 0; 900 node_present[nodes] = 0; 934 901 935 for (t = 0; t < g->p.nr_threads; t++) 902 for (t = 0; t < g->p.nr_threads; t++) { 936 struct thread_data *td; 903 struct thread_data *td; 937 int task_nr; 904 int task_nr; 938 int node; 905 int node; 939 906 940 task_nr = process_nr*g->p.nr_t 907 task_nr = process_nr*g->p.nr_threads + t; 941 td = g->threads + task_nr; 908 td = g->threads + task_nr; 942 909 943 node = numa_node_of_cpu(td->cu 910 node = numa_node_of_cpu(td->curr_cpu); 944 if (node < 0) /* curr_cpu was 911 if (node < 0) /* curr_cpu was likely still -1 */ { 945 free(node_present); 912 free(node_present); 946 return 0; 913 return 0; 947 } 914 } 948 915 949 node_present[node] = 1; 916 node_present[node] = 1; 950 } 917 } 951 918 952 nodes = 0; 919 nodes = 0; 953 920 954 for (n = 0; n < g->p.nr_nodes; n++) 921 for (n = 0; n < g->p.nr_nodes; n++) 955 nodes += node_present[n]; 922 nodes += node_present[n]; 956 923 957 free(node_present); 924 free(node_present); 958 return nodes; 925 return nodes; 959 } 926 } 960 927 961 /* 928 /* 962 * Count the number of distinct process-thread 929 * Count the number of distinct process-threads a node contains. 963 * 930 * 964 * A count of 1 means that the node contains o 931 * A count of 1 means that the node contains only a single 965 * process. If all nodes on the system contain 932 * process. If all nodes on the system contain at most one 966 * process then we are well-converged. 933 * process then we are well-converged. 967 */ 934 */ 968 static int count_node_processes(int node) 935 static int count_node_processes(int node) 969 { 936 { 970 int processes = 0; 937 int processes = 0; 971 int t, p; 938 int t, p; 972 939 973 for (p = 0; p < g->p.nr_proc; p++) { 940 for (p = 0; p < g->p.nr_proc; p++) { 974 for (t = 0; t < g->p.nr_thread 941 for (t = 0; t < g->p.nr_threads; t++) { 975 struct thread_data *td 942 struct thread_data *td; 976 int task_nr; 943 int task_nr; 977 int n; 944 int n; 978 945 979 task_nr = p*g->p.nr_th 946 task_nr = p*g->p.nr_threads + t; 980 td = g->threads + task 947 td = g->threads + task_nr; 981 948 982 n = numa_node_of_cpu(t 949 n = numa_node_of_cpu(td->curr_cpu); 983 if (n == node) { 950 if (n == node) { 984 processes++; 951 processes++; 985 break; 952 break; 986 } 953 } 987 } 954 } 988 } 955 } 989 956 990 return processes; 957 return processes; 991 } 958 } 992 959 993 static void calc_convergence_compression(int * 960 static void calc_convergence_compression(int *strong) 994 { 961 { 995 unsigned int nodes_min, nodes_max; 962 unsigned int nodes_min, nodes_max; 996 int p; 963 int p; 997 964 998 nodes_min = -1; 965 nodes_min = -1; 999 nodes_max = 0; 966 nodes_max = 0; 1000 967 1001 for (p = 0; p < g->p.nr_proc; p++) { 968 for (p = 0; p < g->p.nr_proc; p++) { 1002 unsigned int nodes = count_pr 969 unsigned int nodes = count_process_nodes(p); 1003 970 1004 if (!nodes) { 971 if (!nodes) { 1005 *strong = 0; 972 *strong = 0; 1006 return; 973 return; 1007 } 974 } 1008 975 1009 nodes_min = min(nodes, nodes_ 976 nodes_min = min(nodes, nodes_min); 1010 nodes_max = max(nodes, nodes_ 977 nodes_max = max(nodes, nodes_max); 1011 } 978 } 1012 979 1013 /* Strong convergence: all threads co 980 /* Strong convergence: all threads compress on a single node: */ 1014 if (nodes_min == 1 && nodes_max == 1) 981 if (nodes_min == 1 && nodes_max == 1) { 1015 *strong = 1; 982 *strong = 1; 1016 } else { 983 } else { 1017 *strong = 0; 984 *strong = 0; 1018 tprintf(" {%d-%d}", nodes_min 985 tprintf(" {%d-%d}", nodes_min, nodes_max); 1019 } 986 } 1020 } 987 } 1021 988 1022 static void calc_convergence(double runtime_n 989 static void calc_convergence(double runtime_ns_max, double *convergence) 1023 { 990 { 1024 unsigned int loops_done_min, loops_do 991 unsigned int loops_done_min, loops_done_max; 1025 int process_groups; 992 int process_groups; 1026 int *nodes; 993 int *nodes; 1027 int distance; 994 int distance; 1028 int nr_min; 995 int nr_min; 1029 int nr_max; 996 int nr_max; 1030 int strong; 997 int strong; 1031 int sum; 998 int sum; 1032 int nr; 999 int nr; 1033 int node; 1000 int node; 1034 int cpu; 1001 int cpu; 1035 int t; 1002 int t; 1036 1003 1037 if (!g->p.show_convergence && !g->p.m 1004 if (!g->p.show_convergence && !g->p.measure_convergence) 1038 return; 1005 return; 1039 1006 1040 nodes = (int *)malloc(g->p.nr_nodes * 1007 nodes = (int *)malloc(g->p.nr_nodes * sizeof(int)); 1041 BUG_ON(!nodes); 1008 BUG_ON(!nodes); 1042 for (node = 0; node < g->p.nr_nodes; 1009 for (node = 0; node < g->p.nr_nodes; node++) 1043 nodes[node] = 0; 1010 nodes[node] = 0; 1044 1011 1045 loops_done_min = -1; 1012 loops_done_min = -1; 1046 loops_done_max = 0; 1013 loops_done_max = 0; 1047 1014 1048 for (t = 0; t < g->p.nr_tasks; t++) { 1015 for (t = 0; t < g->p.nr_tasks; t++) { 1049 struct thread_data *td = g->t 1016 struct thread_data *td = g->threads + t; 1050 unsigned int loops_done; 1017 unsigned int loops_done; 1051 1018 1052 cpu = td->curr_cpu; 1019 cpu = td->curr_cpu; 1053 1020 1054 /* Not all threads have writt 1021 /* Not all threads have written it yet: */ 1055 if (cpu < 0) 1022 if (cpu < 0) 1056 continue; 1023 continue; 1057 1024 1058 node = numa_node_of_cpu(cpu); 1025 node = numa_node_of_cpu(cpu); 1059 1026 1060 nodes[node]++; 1027 nodes[node]++; 1061 1028 1062 loops_done = td->loops_done; 1029 loops_done = td->loops_done; 1063 loops_done_min = min(loops_do 1030 loops_done_min = min(loops_done, loops_done_min); 1064 loops_done_max = max(loops_do 1031 loops_done_max = max(loops_done, loops_done_max); 1065 } 1032 } 1066 1033 1067 nr_max = 0; 1034 nr_max = 0; 1068 nr_min = g->p.nr_tasks; 1035 nr_min = g->p.nr_tasks; 1069 sum = 0; 1036 sum = 0; 1070 1037 1071 for (node = 0; node < g->p.nr_nodes; 1038 for (node = 0; node < g->p.nr_nodes; node++) { 1072 if (!is_node_present(node)) 1039 if (!is_node_present(node)) 1073 continue; 1040 continue; 1074 nr = nodes[node]; 1041 nr = nodes[node]; 1075 nr_min = min(nr, nr_min); 1042 nr_min = min(nr, nr_min); 1076 nr_max = max(nr, nr_max); 1043 nr_max = max(nr, nr_max); 1077 sum += nr; 1044 sum += nr; 1078 } 1045 } 1079 BUG_ON(nr_min > nr_max); 1046 BUG_ON(nr_min > nr_max); 1080 1047 1081 BUG_ON(sum > g->p.nr_tasks); 1048 BUG_ON(sum > g->p.nr_tasks); 1082 1049 1083 if (0 && (sum < g->p.nr_tasks)) { 1050 if (0 && (sum < g->p.nr_tasks)) { 1084 free(nodes); 1051 free(nodes); 1085 return; 1052 return; 1086 } 1053 } 1087 1054 1088 /* 1055 /* 1089 * Count the number of distinct proce 1056 * Count the number of distinct process groups present 1090 * on nodes - when we are converged t 1057 * on nodes - when we are converged this will decrease 1091 * to g->p.nr_proc: 1058 * to g->p.nr_proc: 1092 */ 1059 */ 1093 process_groups = 0; 1060 process_groups = 0; 1094 1061 1095 for (node = 0; node < g->p.nr_nodes; 1062 for (node = 0; node < g->p.nr_nodes; node++) { 1096 int processes; 1063 int processes; 1097 1064 1098 if (!is_node_present(node)) 1065 if (!is_node_present(node)) 1099 continue; 1066 continue; 1100 processes = count_node_proces 1067 processes = count_node_processes(node); 1101 nr = nodes[node]; 1068 nr = nodes[node]; 1102 tprintf(" %2d/%-2d", nr, proc 1069 tprintf(" %2d/%-2d", nr, processes); 1103 1070 1104 process_groups += processes; 1071 process_groups += processes; 1105 } 1072 } 1106 1073 1107 distance = nr_max - nr_min; 1074 distance = nr_max - nr_min; 1108 1075 1109 tprintf(" [%2d/%-2d]", distance, proc 1076 tprintf(" [%2d/%-2d]", distance, process_groups); 1110 1077 1111 tprintf(" l:%3d-%-3d (%3d)", 1078 tprintf(" l:%3d-%-3d (%3d)", 1112 loops_done_min, loops_done_ma 1079 loops_done_min, loops_done_max, loops_done_max-loops_done_min); 1113 1080 1114 if (loops_done_min && loops_done_max) 1081 if (loops_done_min && loops_done_max) { 1115 double skew = 1.0 - (double)l 1082 double skew = 1.0 - (double)loops_done_min/loops_done_max; 1116 1083 1117 tprintf(" [%4.1f%%]", skew * 1084 tprintf(" [%4.1f%%]", skew * 100.0); 1118 } 1085 } 1119 1086 1120 calc_convergence_compression(&strong) 1087 calc_convergence_compression(&strong); 1121 1088 1122 if (strong && process_groups == g->p. 1089 if (strong && process_groups == g->p.nr_proc) { 1123 if (!*convergence) { 1090 if (!*convergence) { 1124 *convergence = runtim 1091 *convergence = runtime_ns_max; 1125 tprintf(" (%6.1fs con 1092 tprintf(" (%6.1fs converged)\n", *convergence / NSEC_PER_SEC); 1126 if (g->p.measure_conv 1093 if (g->p.measure_convergence) { 1127 g->all_conver 1094 g->all_converged = true; 1128 g->stop_work 1095 g->stop_work = true; 1129 } 1096 } 1130 } 1097 } 1131 } else { 1098 } else { 1132 if (*convergence) { 1099 if (*convergence) { 1133 tprintf(" (%6.1fs de- 1100 tprintf(" (%6.1fs de-converged)", runtime_ns_max / NSEC_PER_SEC); 1134 *convergence = 0; 1101 *convergence = 0; 1135 } 1102 } 1136 tprintf("\n"); 1103 tprintf("\n"); 1137 } 1104 } 1138 1105 1139 free(nodes); 1106 free(nodes); 1140 } 1107 } 1141 1108 1142 static void show_summary(double runtime_ns_ma 1109 static void show_summary(double runtime_ns_max, int l, double *convergence) 1143 { 1110 { 1144 tprintf("\r # %5.1f%% [%.1f mins]", 1111 tprintf("\r # %5.1f%% [%.1f mins]", 1145 (double)(l+1)/g->p.nr_loops*1 1112 (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0); 1146 1113 1147 calc_convergence(runtime_ns_max, conv 1114 calc_convergence(runtime_ns_max, convergence); 1148 1115 1149 if (g->p.show_details >= 0) 1116 if (g->p.show_details >= 0) 1150 fflush(stdout); 1117 fflush(stdout); 1151 } 1118 } 1152 1119 1153 static void *worker_thread(void *__tdata) 1120 static void *worker_thread(void *__tdata) 1154 { 1121 { 1155 struct thread_data *td = __tdata; 1122 struct thread_data *td = __tdata; 1156 struct timeval start0, start, stop, d 1123 struct timeval start0, start, stop, diff; 1157 int process_nr = td->process_nr; 1124 int process_nr = td->process_nr; 1158 int thread_nr = td->thread_nr; 1125 int thread_nr = td->thread_nr; 1159 unsigned long last_perturbance; 1126 unsigned long last_perturbance; 1160 int task_nr = td->task_nr; 1127 int task_nr = td->task_nr; 1161 int details = g->p.show_details; 1128 int details = g->p.show_details; 1162 int first_task, last_task; 1129 int first_task, last_task; 1163 double convergence = 0; 1130 double convergence = 0; 1164 u64 val = td->val; 1131 u64 val = td->val; 1165 double runtime_ns_max; 1132 double runtime_ns_max; 1166 u8 *global_data; 1133 u8 *global_data; 1167 u8 *process_data; 1134 u8 *process_data; 1168 u8 *thread_data; 1135 u8 *thread_data; 1169 u64 bytes_done, secs; 1136 u64 bytes_done, secs; 1170 long work_done; 1137 long work_done; 1171 u32 l; 1138 u32 l; 1172 struct rusage rusage; 1139 struct rusage rusage; 1173 1140 1174 bind_to_cpumask(td->bind_cpumask); 1141 bind_to_cpumask(td->bind_cpumask); 1175 bind_to_memnode(td->bind_node); 1142 bind_to_memnode(td->bind_node); 1176 1143 1177 set_taskname("thread %d/%d", process_ 1144 set_taskname("thread %d/%d", process_nr, thread_nr); 1178 1145 1179 global_data = g->data; 1146 global_data = g->data; 1180 process_data = td->process_data; 1147 process_data = td->process_data; 1181 thread_data = setup_private_data(g->p 1148 thread_data = setup_private_data(g->p.bytes_thread); 1182 1149 1183 bytes_done = 0; 1150 bytes_done = 0; 1184 1151 1185 last_task = 0; 1152 last_task = 0; 1186 if (process_nr == g->p.nr_proc-1 && t 1153 if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1) 1187 last_task = 1; 1154 last_task = 1; 1188 1155 1189 first_task = 0; 1156 first_task = 0; 1190 if (process_nr == 0 && thread_nr == 0 1157 if (process_nr == 0 && thread_nr == 0) 1191 first_task = 1; 1158 first_task = 1; 1192 1159 1193 if (details >= 2) { 1160 if (details >= 2) { 1194 printf("# thread %2d / %2d g 1161 printf("# thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n", 1195 process_nr, thread_nr 1162 process_nr, thread_nr, global_data, process_data, thread_data); 1196 } 1163 } 1197 1164 1198 if (g->p.serialize_startup) { 1165 if (g->p.serialize_startup) { 1199 mutex_lock(&g->startup_mutex) !! 1166 pthread_mutex_lock(&g->startup_mutex); 1200 g->nr_tasks_started++; 1167 g->nr_tasks_started++; 1201 /* The last thread wakes the 1168 /* The last thread wakes the main process. */ 1202 if (g->nr_tasks_started == g- 1169 if (g->nr_tasks_started == g->p.nr_tasks) 1203 cond_signal(&g->start !! 1170 pthread_cond_signal(&g->startup_cond); 1204 1171 1205 mutex_unlock(&g->startup_mute !! 1172 pthread_mutex_unlock(&g->startup_mutex); 1206 1173 1207 /* Here we will wait for the 1174 /* Here we will wait for the main process to start us all at once: */ 1208 mutex_lock(&g->start_work_mut !! 1175 pthread_mutex_lock(&g->start_work_mutex); 1209 g->start_work = false; 1176 g->start_work = false; 1210 g->nr_tasks_working++; 1177 g->nr_tasks_working++; 1211 while (!g->start_work) 1178 while (!g->start_work) 1212 cond_wait(&g->start_w !! 1179 pthread_cond_wait(&g->start_work_cond, &g->start_work_mutex); 1213 1180 1214 mutex_unlock(&g->start_work_m !! 1181 pthread_mutex_unlock(&g->start_work_mutex); 1215 } 1182 } 1216 1183 1217 gettimeofday(&start0, NULL); 1184 gettimeofday(&start0, NULL); 1218 1185 1219 start = stop = start0; 1186 start = stop = start0; 1220 last_perturbance = start.tv_sec; 1187 last_perturbance = start.tv_sec; 1221 1188 1222 for (l = 0; l < g->p.nr_loops; l++) { 1189 for (l = 0; l < g->p.nr_loops; l++) { 1223 start = stop; 1190 start = stop; 1224 1191 1225 if (g->stop_work) 1192 if (g->stop_work) 1226 break; 1193 break; 1227 1194 1228 val += do_work(global_data, 1195 val += do_work(global_data, g->p.bytes_global, process_nr, g->p.nr_proc, l, val); 1229 val += do_work(process_data, 1196 val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val); 1230 val += do_work(thread_data, 1197 val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val); 1231 1198 1232 if (g->p.sleep_usecs) { 1199 if (g->p.sleep_usecs) { 1233 mutex_lock(td->proces !! 1200 pthread_mutex_lock(td->process_lock); 1234 usleep(g->p.sleep_use 1201 usleep(g->p.sleep_usecs); 1235 mutex_unlock(td->proc !! 1202 pthread_mutex_unlock(td->process_lock); 1236 } 1203 } 1237 /* 1204 /* 1238 * Amount of work to be done 1205 * Amount of work to be done under a process-global lock: 1239 */ 1206 */ 1240 if (g->p.bytes_process_locked 1207 if (g->p.bytes_process_locked) { 1241 mutex_lock(td->proces !! 1208 pthread_mutex_lock(td->process_lock); 1242 val += do_work(proces 1209 val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val); 1243 mutex_unlock(td->proc !! 1210 pthread_mutex_unlock(td->process_lock); 1244 } 1211 } 1245 1212 1246 work_done = g->p.bytes_global 1213 work_done = g->p.bytes_global + g->p.bytes_process + 1247 g->p.bytes_proces 1214 g->p.bytes_process_locked + g->p.bytes_thread; 1248 1215 1249 update_curr_cpu(task_nr, work 1216 update_curr_cpu(task_nr, work_done); 1250 bytes_done += work_done; 1217 bytes_done += work_done; 1251 1218 1252 if (details < 0 && !g->p.pert 1219 if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs) 1253 continue; 1220 continue; 1254 1221 1255 td->loops_done = l; 1222 td->loops_done = l; 1256 1223 1257 gettimeofday(&stop, NULL); 1224 gettimeofday(&stop, NULL); 1258 1225 1259 /* Check whether our max runt 1226 /* Check whether our max runtime timed out: */ 1260 if (g->p.nr_secs) { 1227 if (g->p.nr_secs) { 1261 timersub(&stop, &star 1228 timersub(&stop, &start0, &diff); 1262 if ((u32)diff.tv_sec 1229 if ((u32)diff.tv_sec >= g->p.nr_secs) { 1263 g->stop_work 1230 g->stop_work = true; 1264 break; 1231 break; 1265 } 1232 } 1266 } 1233 } 1267 1234 1268 /* Update the summary at most 1235 /* Update the summary at most once per second: */ 1269 if (start.tv_sec == stop.tv_s 1236 if (start.tv_sec == stop.tv_sec) 1270 continue; 1237 continue; 1271 1238 1272 /* 1239 /* 1273 * Perturb the first task's e 1240 * Perturb the first task's equilibrium every g->p.perturb_secs seconds, 1274 * by migrating to CPU#0: 1241 * by migrating to CPU#0: 1275 */ 1242 */ 1276 if (first_task && g->p.pertur 1243 if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) { 1277 cpu_set_t *orig_mask; !! 1244 cpu_set_t orig_mask; 1278 int target_cpu; 1245 int target_cpu; 1279 int this_cpu; 1246 int this_cpu; 1280 1247 1281 last_perturbance = st 1248 last_perturbance = stop.tv_sec; 1282 1249 1283 /* 1250 /* 1284 * Depending on where 1251 * Depending on where we are running, move into 1285 * the other half of 1252 * the other half of the system, to create some 1286 * real disturbance: 1253 * real disturbance: 1287 */ 1254 */ 1288 this_cpu = g->threads 1255 this_cpu = g->threads[task_nr].curr_cpu; 1289 if (this_cpu < g->p.n 1256 if (this_cpu < g->p.nr_cpus/2) 1290 target_cpu = 1257 target_cpu = g->p.nr_cpus-1; 1291 else 1258 else 1292 target_cpu = 1259 target_cpu = 0; 1293 1260 1294 orig_mask = bind_to_c 1261 orig_mask = bind_to_cpu(target_cpu); 1295 1262 1296 /* Here we are runnin 1263 /* Here we are running on the target CPU already */ 1297 if (details >= 1) 1264 if (details >= 1) 1298 printf(" (inj 1265 printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu); 1299 1266 1300 bind_to_cpumask(orig_ 1267 bind_to_cpumask(orig_mask); 1301 CPU_FREE(orig_mask); << 1302 } 1268 } 1303 1269 1304 if (details >= 3) { 1270 if (details >= 3) { 1305 timersub(&stop, &star 1271 timersub(&stop, &start, &diff); 1306 runtime_ns_max = diff 1272 runtime_ns_max = diff.tv_sec * NSEC_PER_SEC; 1307 runtime_ns_max += dif 1273 runtime_ns_max += diff.tv_usec * NSEC_PER_USEC; 1308 1274 1309 if (details >= 0) { 1275 if (details >= 0) { 1310 printf(" #%2d 1276 printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n", 1311 proce 1277 process_nr, thread_nr, runtime_ns_max / bytes_done, val); 1312 } 1278 } 1313 fflush(stdout); 1279 fflush(stdout); 1314 } 1280 } 1315 if (!last_task) 1281 if (!last_task) 1316 continue; 1282 continue; 1317 1283 1318 timersub(&stop, &start0, &dif 1284 timersub(&stop, &start0, &diff); 1319 runtime_ns_max = diff.tv_sec 1285 runtime_ns_max = diff.tv_sec * NSEC_PER_SEC; 1320 runtime_ns_max += diff.tv_use 1286 runtime_ns_max += diff.tv_usec * NSEC_PER_USEC; 1321 1287 1322 show_summary(runtime_ns_max, 1288 show_summary(runtime_ns_max, l, &convergence); 1323 } 1289 } 1324 1290 1325 gettimeofday(&stop, NULL); 1291 gettimeofday(&stop, NULL); 1326 timersub(&stop, &start0, &diff); 1292 timersub(&stop, &start0, &diff); 1327 td->runtime_ns = diff.tv_sec * NSEC_P 1293 td->runtime_ns = diff.tv_sec * NSEC_PER_SEC; 1328 td->runtime_ns += diff.tv_usec * NSEC 1294 td->runtime_ns += diff.tv_usec * NSEC_PER_USEC; 1329 secs = td->runtime_ns / NSEC_PER_SEC; 1295 secs = td->runtime_ns / NSEC_PER_SEC; 1330 td->speed_gbs = secs ? bytes_done / s 1296 td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0; 1331 1297 1332 getrusage(RUSAGE_THREAD, &rusage); 1298 getrusage(RUSAGE_THREAD, &rusage); 1333 td->system_time_ns = rusage.ru_stime. 1299 td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC; 1334 td->system_time_ns += rusage.ru_stime 1300 td->system_time_ns += rusage.ru_stime.tv_usec * NSEC_PER_USEC; 1335 td->user_time_ns = rusage.ru_utime.tv 1301 td->user_time_ns = rusage.ru_utime.tv_sec * NSEC_PER_SEC; 1336 td->user_time_ns += rusage.ru_utime.t 1302 td->user_time_ns += rusage.ru_utime.tv_usec * NSEC_PER_USEC; 1337 1303 1338 free_data(thread_data, g->p.bytes_thr 1304 free_data(thread_data, g->p.bytes_thread); 1339 1305 1340 mutex_lock(&g->stop_work_mutex); !! 1306 pthread_mutex_lock(&g->stop_work_mutex); 1341 g->bytes_done += bytes_done; 1307 g->bytes_done += bytes_done; 1342 mutex_unlock(&g->stop_work_mutex); !! 1308 pthread_mutex_unlock(&g->stop_work_mutex); 1343 1309 1344 return NULL; 1310 return NULL; 1345 } 1311 } 1346 1312 1347 /* 1313 /* 1348 * A worker process starts a couple of thread 1314 * A worker process starts a couple of threads: 1349 */ 1315 */ 1350 static void worker_process(int process_nr) 1316 static void worker_process(int process_nr) 1351 { 1317 { 1352 struct mutex process_lock; !! 1318 pthread_mutex_t process_lock; 1353 struct thread_data *td; 1319 struct thread_data *td; 1354 pthread_t *pthreads; 1320 pthread_t *pthreads; 1355 u8 *process_data; 1321 u8 *process_data; 1356 int task_nr; 1322 int task_nr; 1357 int ret; 1323 int ret; 1358 int t; 1324 int t; 1359 1325 1360 mutex_init(&process_lock); !! 1326 pthread_mutex_init(&process_lock, NULL); 1361 set_taskname("process %d", process_nr 1327 set_taskname("process %d", process_nr); 1362 1328 1363 /* 1329 /* 1364 * Pick up the memory policy and the 1330 * Pick up the memory policy and the CPU binding of our first thread, 1365 * so that we initialize memory accor 1331 * so that we initialize memory accordingly: 1366 */ 1332 */ 1367 task_nr = process_nr*g->p.nr_threads; 1333 task_nr = process_nr*g->p.nr_threads; 1368 td = g->threads + task_nr; 1334 td = g->threads + task_nr; 1369 1335 1370 bind_to_memnode(td->bind_node); 1336 bind_to_memnode(td->bind_node); 1371 bind_to_cpumask(td->bind_cpumask); 1337 bind_to_cpumask(td->bind_cpumask); 1372 1338 1373 pthreads = zalloc(g->p.nr_threads * s 1339 pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t)); 1374 process_data = setup_private_data(g-> 1340 process_data = setup_private_data(g->p.bytes_process); 1375 1341 1376 if (g->p.show_details >= 3) { 1342 if (g->p.show_details >= 3) { 1377 printf(" # process %2d global 1343 printf(" # process %2d global mem: %p, process mem: %p\n", 1378 process_nr, g->data, 1344 process_nr, g->data, process_data); 1379 } 1345 } 1380 1346 1381 for (t = 0; t < g->p.nr_threads; t++) 1347 for (t = 0; t < g->p.nr_threads; t++) { 1382 task_nr = process_nr*g->p.nr_ 1348 task_nr = process_nr*g->p.nr_threads + t; 1383 td = g->threads + task_nr; 1349 td = g->threads + task_nr; 1384 1350 1385 td->process_data = process_da 1351 td->process_data = process_data; 1386 td->process_nr = process_nr 1352 td->process_nr = process_nr; 1387 td->thread_nr = t; 1353 td->thread_nr = t; 1388 td->task_nr = task_nr; 1354 td->task_nr = task_nr; 1389 td->val = rand(); 1355 td->val = rand(); 1390 td->curr_cpu = -1; 1356 td->curr_cpu = -1; 1391 td->process_lock = &process_l 1357 td->process_lock = &process_lock; 1392 1358 1393 ret = pthread_create(pthreads 1359 ret = pthread_create(pthreads + t, NULL, worker_thread, td); 1394 BUG_ON(ret); 1360 BUG_ON(ret); 1395 } 1361 } 1396 1362 1397 for (t = 0; t < g->p.nr_threads; t++) 1363 for (t = 0; t < g->p.nr_threads; t++) { 1398 ret = pthread_join(pthreads[t 1364 ret = pthread_join(pthreads[t], NULL); 1399 BUG_ON(ret); 1365 BUG_ON(ret); 1400 } 1366 } 1401 1367 1402 free_data(process_data, g->p.bytes_pr 1368 free_data(process_data, g->p.bytes_process); 1403 free(pthreads); 1369 free(pthreads); 1404 } 1370 } 1405 1371 1406 static void print_summary(void) 1372 static void print_summary(void) 1407 { 1373 { 1408 if (g->p.show_details < 0) 1374 if (g->p.show_details < 0) 1409 return; 1375 return; 1410 1376 1411 printf("\n ###\n"); 1377 printf("\n ###\n"); 1412 printf(" # %d %s will execute (on %d 1378 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", 1413 g->p.nr_tasks, g->p.nr_tasks 1379 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus); 1414 printf(" # %5dx %5ldMB global s 1380 printf(" # %5dx %5ldMB global shared mem operations\n", 1415 g->p.nr_loops, g->p.b 1381 g->p.nr_loops, g->p.bytes_global/1024/1024); 1416 printf(" # %5dx %5ldMB process s 1382 printf(" # %5dx %5ldMB process shared mem operations\n", 1417 g->p.nr_loops, g->p.b 1383 g->p.nr_loops, g->p.bytes_process/1024/1024); 1418 printf(" # %5dx %5ldMB thread l 1384 printf(" # %5dx %5ldMB thread local mem operations\n", 1419 g->p.nr_loops, g->p.b 1385 g->p.nr_loops, g->p.bytes_thread/1024/1024); 1420 1386 1421 printf(" ###\n"); 1387 printf(" ###\n"); 1422 1388 1423 printf("\n ###\n"); fflush(stdout); 1389 printf("\n ###\n"); fflush(stdout); 1424 } 1390 } 1425 1391 1426 static void init_thread_data(void) 1392 static void init_thread_data(void) 1427 { 1393 { 1428 ssize_t size = sizeof(*g->threads)*g- 1394 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; 1429 int t; 1395 int t; 1430 1396 1431 g->threads = zalloc_shared_data(size) 1397 g->threads = zalloc_shared_data(size); 1432 1398 1433 for (t = 0; t < g->p.nr_tasks; t++) { 1399 for (t = 0; t < g->p.nr_tasks; t++) { 1434 struct thread_data *td = g->t 1400 struct thread_data *td = g->threads + t; 1435 size_t cpuset_size = CPU_ALLO << 1436 int cpu; 1401 int cpu; 1437 1402 1438 /* Allow all nodes by default 1403 /* Allow all nodes by default: */ 1439 td->bind_node = NUMA_NO_NODE; 1404 td->bind_node = NUMA_NO_NODE; 1440 1405 1441 /* Allow all CPUs by default: 1406 /* Allow all CPUs by default: */ 1442 td->bind_cpumask = CPU_ALLOC( !! 1407 CPU_ZERO(&td->bind_cpumask); 1443 BUG_ON(!td->bind_cpumask); << 1444 CPU_ZERO_S(cpuset_size, td->b << 1445 for (cpu = 0; cpu < g->p.nr_c 1408 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) 1446 CPU_SET_S(cpu, cpuset !! 1409 CPU_SET(cpu, &td->bind_cpumask); 1447 } 1410 } 1448 } 1411 } 1449 1412 1450 static void deinit_thread_data(void) 1413 static void deinit_thread_data(void) 1451 { 1414 { 1452 ssize_t size = sizeof(*g->threads)*g- 1415 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; 1453 int t; << 1454 << 1455 /* Free the bind_cpumask allocated fo << 1456 for (t = 0; t < g->p.nr_tasks; t++) { << 1457 struct thread_data *td = g->t << 1458 CPU_FREE(td->bind_cpumask); << 1459 } << 1460 1416 1461 free_data(g->threads, size); 1417 free_data(g->threads, size); 1462 } 1418 } 1463 1419 1464 static int init(void) 1420 static int init(void) 1465 { 1421 { 1466 g = (void *)alloc_data(sizeof(*g), MA 1422 g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0); 1467 1423 1468 /* Copy over options: */ 1424 /* Copy over options: */ 1469 g->p = p0; 1425 g->p = p0; 1470 1426 1471 g->p.nr_cpus = numa_num_configured_cp 1427 g->p.nr_cpus = numa_num_configured_cpus(); 1472 1428 1473 g->p.nr_nodes = numa_max_node() + 1; 1429 g->p.nr_nodes = numa_max_node() + 1; 1474 1430 1475 /* char array in count_process_nodes( 1431 /* char array in count_process_nodes(): */ 1476 BUG_ON(g->p.nr_nodes < 0); 1432 BUG_ON(g->p.nr_nodes < 0); 1477 1433 1478 if (quiet && !g->p.show_details) !! 1434 if (g->p.show_quiet && !g->p.show_details) 1479 g->p.show_details = -1; 1435 g->p.show_details = -1; 1480 1436 1481 /* Some memory should be specified: * 1437 /* Some memory should be specified: */ 1482 if (!g->p.mb_global_str && !g->p.mb_p 1438 if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str) 1483 return -1; 1439 return -1; 1484 1440 1485 if (g->p.mb_global_str) { 1441 if (g->p.mb_global_str) { 1486 g->p.mb_global = atof(g->p.mb 1442 g->p.mb_global = atof(g->p.mb_global_str); 1487 BUG_ON(g->p.mb_global < 0); 1443 BUG_ON(g->p.mb_global < 0); 1488 } 1444 } 1489 1445 1490 if (g->p.mb_proc_str) { 1446 if (g->p.mb_proc_str) { 1491 g->p.mb_proc = atof(g->p.mb_p 1447 g->p.mb_proc = atof(g->p.mb_proc_str); 1492 BUG_ON(g->p.mb_proc < 0); 1448 BUG_ON(g->p.mb_proc < 0); 1493 } 1449 } 1494 1450 1495 if (g->p.mb_proc_locked_str) { 1451 if (g->p.mb_proc_locked_str) { 1496 g->p.mb_proc_locked = atof(g- 1452 g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str); 1497 BUG_ON(g->p.mb_proc_locked < 1453 BUG_ON(g->p.mb_proc_locked < 0); 1498 BUG_ON(g->p.mb_proc_locked > 1454 BUG_ON(g->p.mb_proc_locked > g->p.mb_proc); 1499 } 1455 } 1500 1456 1501 if (g->p.mb_thread_str) { 1457 if (g->p.mb_thread_str) { 1502 g->p.mb_thread = atof(g->p.mb 1458 g->p.mb_thread = atof(g->p.mb_thread_str); 1503 BUG_ON(g->p.mb_thread < 0); 1459 BUG_ON(g->p.mb_thread < 0); 1504 } 1460 } 1505 1461 1506 BUG_ON(g->p.nr_threads <= 0); 1462 BUG_ON(g->p.nr_threads <= 0); 1507 BUG_ON(g->p.nr_proc <= 0); 1463 BUG_ON(g->p.nr_proc <= 0); 1508 1464 1509 g->p.nr_tasks = g->p.nr_proc*g->p.nr_ 1465 g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads; 1510 1466 1511 g->p.bytes_global = g-> 1467 g->p.bytes_global = g->p.mb_global *1024L*1024L; 1512 g->p.bytes_process = g-> 1468 g->p.bytes_process = g->p.mb_proc *1024L*1024L; 1513 g->p.bytes_process_locked = g-> 1469 g->p.bytes_process_locked = g->p.mb_proc_locked *1024L*1024L; 1514 g->p.bytes_thread = g-> 1470 g->p.bytes_thread = g->p.mb_thread *1024L*1024L; 1515 1471 1516 g->data = setup_shared_data(g->p.byte 1472 g->data = setup_shared_data(g->p.bytes_global); 1517 1473 1518 /* Startup serialization: */ 1474 /* Startup serialization: */ 1519 mutex_init_pshared(&g->start_work_mut !! 1475 init_global_mutex(&g->start_work_mutex); 1520 cond_init_pshared(&g->start_work_cond !! 1476 init_global_cond(&g->start_work_cond); 1521 mutex_init_pshared(&g->startup_mutex) !! 1477 init_global_mutex(&g->startup_mutex); 1522 cond_init_pshared(&g->startup_cond); !! 1478 init_global_cond(&g->startup_cond); 1523 mutex_init_pshared(&g->stop_work_mute !! 1479 init_global_mutex(&g->stop_work_mutex); 1524 1480 1525 init_thread_data(); 1481 init_thread_data(); 1526 1482 1527 tprintf("#\n"); 1483 tprintf("#\n"); 1528 if (parse_setup_cpu_list() || parse_s 1484 if (parse_setup_cpu_list() || parse_setup_node_list()) 1529 return -1; 1485 return -1; 1530 tprintf("#\n"); 1486 tprintf("#\n"); 1531 1487 1532 print_summary(); 1488 print_summary(); 1533 1489 1534 return 0; 1490 return 0; 1535 } 1491 } 1536 1492 1537 static void deinit(void) 1493 static void deinit(void) 1538 { 1494 { 1539 free_data(g->data, g->p.bytes_global) 1495 free_data(g->data, g->p.bytes_global); 1540 g->data = NULL; 1496 g->data = NULL; 1541 1497 1542 deinit_thread_data(); 1498 deinit_thread_data(); 1543 1499 1544 free_data(g, sizeof(*g)); 1500 free_data(g, sizeof(*g)); 1545 g = NULL; 1501 g = NULL; 1546 } 1502 } 1547 1503 1548 /* 1504 /* 1549 * Print a short or long result, depending on 1505 * Print a short or long result, depending on the verbosity setting: 1550 */ 1506 */ 1551 static void print_res(const char *name, doubl 1507 static void print_res(const char *name, double val, 1552 const char *txt_unit, c 1508 const char *txt_unit, const char *txt_short, const char *txt_long) 1553 { 1509 { 1554 if (!name) 1510 if (!name) 1555 name = "main,"; 1511 name = "main,"; 1556 1512 1557 if (!quiet) !! 1513 if (!g->p.show_quiet) 1558 printf(" %-30s %15.3f, %-15s 1514 printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short); 1559 else 1515 else 1560 printf(" %14.3f %s\n", val, t 1516 printf(" %14.3f %s\n", val, txt_long); 1561 } 1517 } 1562 1518 1563 static int __bench_numa(const char *name) 1519 static int __bench_numa(const char *name) 1564 { 1520 { 1565 struct timeval start, stop, diff; 1521 struct timeval start, stop, diff; 1566 u64 runtime_ns_min, runtime_ns_sum; 1522 u64 runtime_ns_min, runtime_ns_sum; 1567 pid_t *pids, pid, wpid; 1523 pid_t *pids, pid, wpid; 1568 double delta_runtime; 1524 double delta_runtime; 1569 double runtime_avg; 1525 double runtime_avg; 1570 double runtime_sec_max; 1526 double runtime_sec_max; 1571 double runtime_sec_min; 1527 double runtime_sec_min; 1572 int wait_stat; 1528 int wait_stat; 1573 double bytes; 1529 double bytes; 1574 int i, t, p; 1530 int i, t, p; 1575 1531 1576 if (init()) 1532 if (init()) 1577 return -1; 1533 return -1; 1578 1534 1579 pids = zalloc(g->p.nr_proc * sizeof(* 1535 pids = zalloc(g->p.nr_proc * sizeof(*pids)); 1580 pid = -1; 1536 pid = -1; 1581 1537 1582 if (g->p.serialize_startup) { 1538 if (g->p.serialize_startup) { 1583 tprintf(" #\n"); 1539 tprintf(" #\n"); 1584 tprintf(" # Startup synchroni 1540 tprintf(" # Startup synchronization: ..."); fflush(stdout); 1585 } 1541 } 1586 1542 1587 gettimeofday(&start, NULL); 1543 gettimeofday(&start, NULL); 1588 1544 1589 for (i = 0; i < g->p.nr_proc; i++) { 1545 for (i = 0; i < g->p.nr_proc; i++) { 1590 pid = fork(); 1546 pid = fork(); 1591 dprintf(" # process %2d: PID 1547 dprintf(" # process %2d: PID %d\n", i, pid); 1592 1548 1593 BUG_ON(pid < 0); 1549 BUG_ON(pid < 0); 1594 if (!pid) { 1550 if (!pid) { 1595 /* Child process: */ 1551 /* Child process: */ 1596 worker_process(i); 1552 worker_process(i); 1597 1553 1598 exit(0); 1554 exit(0); 1599 } 1555 } 1600 pids[i] = pid; 1556 pids[i] = pid; 1601 1557 1602 } 1558 } 1603 1559 1604 if (g->p.serialize_startup) { 1560 if (g->p.serialize_startup) { 1605 bool threads_ready = false; 1561 bool threads_ready = false; 1606 double startup_sec; 1562 double startup_sec; 1607 1563 1608 /* 1564 /* 1609 * Wait for all the threads t 1565 * Wait for all the threads to start up. The last thread will 1610 * signal this process. 1566 * signal this process. 1611 */ 1567 */ 1612 mutex_lock(&g->startup_mutex) !! 1568 pthread_mutex_lock(&g->startup_mutex); 1613 while (g->nr_tasks_started != 1569 while (g->nr_tasks_started != g->p.nr_tasks) 1614 cond_wait(&g->startup !! 1570 pthread_cond_wait(&g->startup_cond, &g->startup_mutex); 1615 1571 1616 mutex_unlock(&g->startup_mute !! 1572 pthread_mutex_unlock(&g->startup_mutex); 1617 1573 1618 /* Wait for all threads to be 1574 /* Wait for all threads to be at the start_work_cond. */ 1619 while (!threads_ready) { 1575 while (!threads_ready) { 1620 mutex_lock(&g->start_ !! 1576 pthread_mutex_lock(&g->start_work_mutex); 1621 threads_ready = (g->n 1577 threads_ready = (g->nr_tasks_working == g->p.nr_tasks); 1622 mutex_unlock(&g->star !! 1578 pthread_mutex_unlock(&g->start_work_mutex); 1623 if (!threads_ready) 1579 if (!threads_ready) 1624 usleep(1); 1580 usleep(1); 1625 } 1581 } 1626 1582 1627 gettimeofday(&stop, NULL); 1583 gettimeofday(&stop, NULL); 1628 1584 1629 timersub(&stop, &start, &diff 1585 timersub(&stop, &start, &diff); 1630 1586 1631 startup_sec = diff.tv_sec * N 1587 startup_sec = diff.tv_sec * NSEC_PER_SEC; 1632 startup_sec += diff.tv_usec * 1588 startup_sec += diff.tv_usec * NSEC_PER_USEC; 1633 startup_sec /= NSEC_PER_SEC; 1589 startup_sec /= NSEC_PER_SEC; 1634 1590 1635 tprintf(" threads initialized 1591 tprintf(" threads initialized in %.6f seconds.\n", startup_sec); 1636 tprintf(" #\n"); 1592 tprintf(" #\n"); 1637 1593 1638 start = stop; 1594 start = stop; 1639 /* Start all threads running. 1595 /* Start all threads running. */ 1640 mutex_lock(&g->start_work_mut !! 1596 pthread_mutex_lock(&g->start_work_mutex); 1641 g->start_work = true; 1597 g->start_work = true; 1642 mutex_unlock(&g->start_work_m !! 1598 pthread_mutex_unlock(&g->start_work_mutex); 1643 cond_broadcast(&g->start_work !! 1599 pthread_cond_broadcast(&g->start_work_cond); 1644 } else { 1600 } else { 1645 gettimeofday(&start, NULL); 1601 gettimeofday(&start, NULL); 1646 } 1602 } 1647 1603 1648 /* Parent process: */ 1604 /* Parent process: */ 1649 1605 1650 1606 1651 for (i = 0; i < g->p.nr_proc; i++) { 1607 for (i = 0; i < g->p.nr_proc; i++) { 1652 wpid = waitpid(pids[i], &wait 1608 wpid = waitpid(pids[i], &wait_stat, 0); 1653 BUG_ON(wpid < 0); 1609 BUG_ON(wpid < 0); 1654 BUG_ON(!WIFEXITED(wait_stat)) 1610 BUG_ON(!WIFEXITED(wait_stat)); 1655 1611 1656 } 1612 } 1657 1613 1658 runtime_ns_sum = 0; 1614 runtime_ns_sum = 0; 1659 runtime_ns_min = -1LL; 1615 runtime_ns_min = -1LL; 1660 1616 1661 for (t = 0; t < g->p.nr_tasks; t++) { 1617 for (t = 0; t < g->p.nr_tasks; t++) { 1662 u64 thread_runtime_ns = g->th 1618 u64 thread_runtime_ns = g->threads[t].runtime_ns; 1663 1619 1664 runtime_ns_sum += thread_runt 1620 runtime_ns_sum += thread_runtime_ns; 1665 runtime_ns_min = min(thread_r 1621 runtime_ns_min = min(thread_runtime_ns, runtime_ns_min); 1666 } 1622 } 1667 1623 1668 gettimeofday(&stop, NULL); 1624 gettimeofday(&stop, NULL); 1669 timersub(&stop, &start, &diff); 1625 timersub(&stop, &start, &diff); 1670 1626 1671 BUG_ON(bench_format != BENCH_FORMAT_D 1627 BUG_ON(bench_format != BENCH_FORMAT_DEFAULT); 1672 1628 1673 tprintf("\n ###\n"); 1629 tprintf("\n ###\n"); 1674 tprintf("\n"); 1630 tprintf("\n"); 1675 1631 1676 runtime_sec_max = diff.tv_sec * NSEC_ 1632 runtime_sec_max = diff.tv_sec * NSEC_PER_SEC; 1677 runtime_sec_max += diff.tv_usec * NSE 1633 runtime_sec_max += diff.tv_usec * NSEC_PER_USEC; 1678 runtime_sec_max /= NSEC_PER_SEC; 1634 runtime_sec_max /= NSEC_PER_SEC; 1679 1635 1680 runtime_sec_min = runtime_ns_min / NS 1636 runtime_sec_min = runtime_ns_min / NSEC_PER_SEC; 1681 1637 1682 bytes = g->bytes_done; 1638 bytes = g->bytes_done; 1683 runtime_avg = (double)runtime_ns_sum 1639 runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC; 1684 1640 1685 if (g->p.measure_convergence) { 1641 if (g->p.measure_convergence) { 1686 print_res(name, runtime_sec_m 1642 print_res(name, runtime_sec_max, 1687 "secs,", "NUMA-conver 1643 "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge"); 1688 } 1644 } 1689 1645 1690 print_res(name, runtime_sec_max, 1646 print_res(name, runtime_sec_max, 1691 "secs,", "runtime-max/thread" 1647 "secs,", "runtime-max/thread", "secs slowest (max) thread-runtime"); 1692 1648 1693 print_res(name, runtime_sec_min, 1649 print_res(name, runtime_sec_min, 1694 "secs,", "runtime-min/thread" 1650 "secs,", "runtime-min/thread", "secs fastest (min) thread-runtime"); 1695 1651 1696 print_res(name, runtime_avg, 1652 print_res(name, runtime_avg, 1697 "secs,", "runtime-avg/thread" 1653 "secs,", "runtime-avg/thread", "secs average thread-runtime"); 1698 1654 1699 delta_runtime = (runtime_sec_max - ru 1655 delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0; 1700 print_res(name, delta_runtime / runti 1656 print_res(name, delta_runtime / runtime_sec_max * 100.0, 1701 "%,", "spread-runtime/thread" 1657 "%,", "spread-runtime/thread", "% difference between max/avg runtime"); 1702 1658 1703 print_res(name, bytes / g->p.nr_tasks 1659 print_res(name, bytes / g->p.nr_tasks / 1e9, 1704 "GB,", "data/thread", 1660 "GB,", "data/thread", "GB data processed, per thread"); 1705 1661 1706 print_res(name, bytes / 1e9, 1662 print_res(name, bytes / 1e9, 1707 "GB,", "data-total", 1663 "GB,", "data-total", "GB data processed, total"); 1708 1664 1709 print_res(name, runtime_sec_max * NSE 1665 print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks), 1710 "nsecs,", "runtime/byte/threa 1666 "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime"); 1711 1667 1712 print_res(name, bytes / g->p.nr_tasks 1668 print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max, 1713 "GB/sec,", "thread-speed", 1669 "GB/sec,", "thread-speed", "GB/sec/thread speed"); 1714 1670 1715 print_res(name, bytes / runtime_sec_m 1671 print_res(name, bytes / runtime_sec_max / 1e9, 1716 "GB/sec,", "total-speed", 1672 "GB/sec,", "total-speed", "GB/sec total speed"); 1717 1673 1718 if (g->p.show_details >= 2) { 1674 if (g->p.show_details >= 2) { 1719 char tname[14 + 2 * 11 + 1]; !! 1675 char tname[14 + 2 * 10 + 1]; 1720 struct thread_data *td; 1676 struct thread_data *td; 1721 for (p = 0; p < g->p.nr_proc; 1677 for (p = 0; p < g->p.nr_proc; p++) { 1722 for (t = 0; t < g->p. 1678 for (t = 0; t < g->p.nr_threads; t++) { 1723 memset(tname, 1679 memset(tname, 0, sizeof(tname)); 1724 td = g->threa 1680 td = g->threads + p*g->p.nr_threads + t; 1725 snprintf(tnam 1681 snprintf(tname, sizeof(tname), "process%d:thread%d", p, t); 1726 print_res(tna 1682 print_res(tname, td->speed_gbs, 1727 "GB/s 1683 "GB/sec", "thread-speed", "GB/sec/thread speed"); 1728 print_res(tna 1684 print_res(tname, td->system_time_ns / NSEC_PER_SEC, 1729 "secs 1685 "secs", "thread-system-time", "system CPU time/thread"); 1730 print_res(tna 1686 print_res(tname, td->user_time_ns / NSEC_PER_SEC, 1731 "secs 1687 "secs", "thread-user-time", "user CPU time/thread"); 1732 } 1688 } 1733 } 1689 } 1734 } 1690 } 1735 1691 1736 free(pids); 1692 free(pids); 1737 1693 1738 deinit(); 1694 deinit(); 1739 1695 1740 return 0; 1696 return 0; 1741 } 1697 } 1742 1698 1743 #define MAX_ARGS 50 1699 #define MAX_ARGS 50 1744 1700 1745 static int command_size(const char **argv) 1701 static int command_size(const char **argv) 1746 { 1702 { 1747 int size = 0; 1703 int size = 0; 1748 1704 1749 while (*argv) { 1705 while (*argv) { 1750 size++; 1706 size++; 1751 argv++; 1707 argv++; 1752 } 1708 } 1753 1709 1754 BUG_ON(size >= MAX_ARGS); 1710 BUG_ON(size >= MAX_ARGS); 1755 1711 1756 return size; 1712 return size; 1757 } 1713 } 1758 1714 1759 static void init_params(struct params *p, con 1715 static void init_params(struct params *p, const char *name, int argc, const char **argv) 1760 { 1716 { 1761 int i; 1717 int i; 1762 1718 1763 printf("\n # Running %s \"perf bench 1719 printf("\n # Running %s \"perf bench numa", name); 1764 1720 1765 for (i = 0; i < argc; i++) 1721 for (i = 0; i < argc; i++) 1766 printf(" %s", argv[i]); 1722 printf(" %s", argv[i]); 1767 1723 1768 printf("\"\n"); 1724 printf("\"\n"); 1769 1725 1770 memset(p, 0, sizeof(*p)); 1726 memset(p, 0, sizeof(*p)); 1771 1727 1772 /* Initialize nonzero defaults: */ 1728 /* Initialize nonzero defaults: */ 1773 1729 1774 p->serialize_startup = 1; 1730 p->serialize_startup = 1; 1775 p->data_reads = tru 1731 p->data_reads = true; 1776 p->data_writes = tru 1732 p->data_writes = true; 1777 p->data_backwards = tru 1733 p->data_backwards = true; 1778 p->data_rand_walk = tru 1734 p->data_rand_walk = true; 1779 p->nr_loops = -1; 1735 p->nr_loops = -1; 1780 p->init_random = tru 1736 p->init_random = true; 1781 p->mb_global_str = "1" 1737 p->mb_global_str = "1"; 1782 p->nr_proc = 1; 1738 p->nr_proc = 1; 1783 p->nr_threads = 1; 1739 p->nr_threads = 1; 1784 p->nr_secs = 5; 1740 p->nr_secs = 5; 1785 p->run_all = arg 1741 p->run_all = argc == 1; 1786 } 1742 } 1787 1743 1788 static int run_bench_numa(const char *name, c 1744 static int run_bench_numa(const char *name, const char **argv) 1789 { 1745 { 1790 int argc = command_size(argv); 1746 int argc = command_size(argv); 1791 1747 1792 init_params(&p0, name, argc, argv); 1748 init_params(&p0, name, argc, argv); 1793 argc = parse_options(argc, argv, opti 1749 argc = parse_options(argc, argv, options, bench_numa_usage, 0); 1794 if (argc) 1750 if (argc) 1795 goto err; 1751 goto err; 1796 1752 1797 if (__bench_numa(name)) 1753 if (__bench_numa(name)) 1798 goto err; 1754 goto err; 1799 1755 1800 return 0; 1756 return 0; 1801 1757 1802 err: 1758 err: 1803 return -1; 1759 return -1; 1804 } 1760 } 1805 1761 1806 #define OPT_BW_RAM "-s", "20", 1762 #define OPT_BW_RAM "-s", "20", "-zZq", "--thp", " 1", "--no-data_rand_walk" 1807 #define OPT_BW_RAM_NOTHP OPT_BW_RAM, 1763 #define OPT_BW_RAM_NOTHP OPT_BW_RAM, "--thp", "-1" 1808 1764 1809 #define OPT_CONV "-s", "100", 1765 #define OPT_CONV "-s", "100", "-zZ0qcm", "--thp", " 1" 1810 #define OPT_CONV_NOTHP OPT_CONV, 1766 #define OPT_CONV_NOTHP OPT_CONV, "--thp", "-1" 1811 1767 1812 #define OPT_BW "-s", "20", 1768 #define OPT_BW "-s", "20", "-zZ0q", "--thp", " 1" 1813 #define OPT_BW_NOTHP OPT_BW, 1769 #define OPT_BW_NOTHP OPT_BW, "--thp", "-1" 1814 1770 1815 /* 1771 /* 1816 * The built-in test-suite executed by "perf 1772 * The built-in test-suite executed by "perf bench numa -a". 1817 * 1773 * 1818 * (A minimum of 4 nodes and 16 GB of RAM is 1774 * (A minimum of 4 nodes and 16 GB of RAM is recommended.) 1819 */ 1775 */ 1820 static const char *tests[][MAX_ARGS] = { 1776 static const char *tests[][MAX_ARGS] = { 1821 /* Basic single-stream NUMA bandwidth meas 1777 /* Basic single-stream NUMA bandwidth measurements: */ 1822 { "RAM-bw-local,", "mem", "-p", "1", 1778 { "RAM-bw-local,", "mem", "-p", "1", "-t", "1", "-P", "1024", 1823 "-C" , "", "-M", 1779 "-C" , "", "-M", "", OPT_BW_RAM }, 1824 { "RAM-bw-local-NOTHP,", 1780 { "RAM-bw-local-NOTHP,", 1825 "mem", "-p", "1", 1781 "mem", "-p", "1", "-t", "1", "-P", "1024", 1826 "-C" , "", "-M", 1782 "-C" , "", "-M", "", OPT_BW_RAM_NOTHP }, 1827 { "RAM-bw-remote,", "mem", "-p", "1", 1783 { "RAM-bw-remote,", "mem", "-p", "1", "-t", "1", "-P", "1024", 1828 "-C" , "", "-M", 1784 "-C" , "", "-M", "1", OPT_BW_RAM }, 1829 1785 1830 /* 2-stream NUMA bandwidth measurements: * 1786 /* 2-stream NUMA bandwidth measurements: */ 1831 { "RAM-bw-local-2x,", "mem", "-p", "2", 1787 { "RAM-bw-local-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024", 1832 "-C", "0,2", "-M", 1788 "-C", "0,2", "-M", "0x2", OPT_BW_RAM }, 1833 { "RAM-bw-remote-2x,", "mem", "-p", "2", 1789 { "RAM-bw-remote-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024", 1834 "-C", "0,2", "-M", 1790 "-C", "0,2", "-M", "1x2", OPT_BW_RAM }, 1835 1791 1836 /* Cross-stream NUMA bandwidth measurement 1792 /* Cross-stream NUMA bandwidth measurement: */ 1837 { "RAM-bw-cross,", "mem", "-p", "2", 1793 { "RAM-bw-cross,", "mem", "-p", "2", "-t", "1", "-P", "1024", 1838 "-C", "0,8", "-M", 1794 "-C", "0,8", "-M", "1,0", OPT_BW_RAM }, 1839 1795 1840 /* Convergence latency measurements: */ 1796 /* Convergence latency measurements: */ 1841 { " 1x3-convergence,", "mem", "-p", "1", 1797 { " 1x3-convergence,", "mem", "-p", "1", "-t", "3", "-P", "512", OPT_CONV }, 1842 { " 1x4-convergence,", "mem", "-p", "1", 1798 { " 1x4-convergence,", "mem", "-p", "1", "-t", "4", "-P", "512", OPT_CONV }, 1843 { " 1x6-convergence,", "mem", "-p", "1", 1799 { " 1x6-convergence,", "mem", "-p", "1", "-t", "6", "-P", "1020", OPT_CONV }, 1844 { " 2x3-convergence,", "mem", "-p", "2", 1800 { " 2x3-convergence,", "mem", "-p", "2", "-t", "3", "-P", "1020", OPT_CONV }, 1845 { " 3x3-convergence,", "mem", "-p", "3", 1801 { " 3x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV }, 1846 { " 4x4-convergence,", "mem", "-p", "4", 1802 { " 4x4-convergence,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV }, 1847 { " 4x4-convergence-NOTHP,", 1803 { " 4x4-convergence-NOTHP,", 1848 "mem", "-p", "4", 1804 "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV_NOTHP }, 1849 { " 4x6-convergence,", "mem", "-p", "4", 1805 { " 4x6-convergence,", "mem", "-p", "4", "-t", "6", "-P", "1020", OPT_CONV }, 1850 { " 4x8-convergence,", "mem", "-p", "4", 1806 { " 4x8-convergence,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_CONV }, 1851 { " 8x4-convergence,", "mem", "-p", "8", 1807 { " 8x4-convergence,", "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV }, 1852 { " 8x4-convergence-NOTHP,", 1808 { " 8x4-convergence-NOTHP,", 1853 "mem", "-p", "8", 1809 "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV_NOTHP }, 1854 { " 3x1-convergence,", "mem", "-p", "3", 1810 { " 3x1-convergence,", "mem", "-p", "3", "-t", "1", "-P", "512", OPT_CONV }, 1855 { " 4x1-convergence,", "mem", "-p", "4", 1811 { " 4x1-convergence,", "mem", "-p", "4", "-t", "1", "-P", "512", OPT_CONV }, 1856 { " 8x1-convergence,", "mem", "-p", "8", 1812 { " 8x1-convergence,", "mem", "-p", "8", "-t", "1", "-P", "512", OPT_CONV }, 1857 { "16x1-convergence,", "mem", "-p", "16", 1813 { "16x1-convergence,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_CONV }, 1858 { "32x1-convergence,", "mem", "-p", "32", 1814 { "32x1-convergence,", "mem", "-p", "32", "-t", "1", "-P", "128", OPT_CONV }, 1859 1815 1860 /* Various NUMA process/thread layout band 1816 /* Various NUMA process/thread layout bandwidth measurements: */ 1861 { " 2x1-bw-process,", "mem", "-p", "2", 1817 { " 2x1-bw-process,", "mem", "-p", "2", "-t", "1", "-P", "1024", OPT_BW }, 1862 { " 3x1-bw-process,", "mem", "-p", "3", 1818 { " 3x1-bw-process,", "mem", "-p", "3", "-t", "1", "-P", "1024", OPT_BW }, 1863 { " 4x1-bw-process,", "mem", "-p", "4", 1819 { " 4x1-bw-process,", "mem", "-p", "4", "-t", "1", "-P", "1024", OPT_BW }, 1864 { " 8x1-bw-process,", "mem", "-p", "8", 1820 { " 8x1-bw-process,", "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW }, 1865 { " 8x1-bw-process-NOTHP,", 1821 { " 8x1-bw-process-NOTHP,", 1866 "mem", "-p", "8", 1822 "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW_NOTHP }, 1867 { "16x1-bw-process,", "mem", "-p", "16", 1823 { "16x1-bw-process,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_BW }, 1868 1824 1869 { " 1x4-bw-thread,", "mem", "-p", "1", 1825 { " 1x4-bw-thread,", "mem", "-p", "1", "-t", "4", "-T", "256", OPT_BW }, 1870 { " 1x8-bw-thread,", "mem", "-p", "1", 1826 { " 1x8-bw-thread,", "mem", "-p", "1", "-t", "8", "-T", "256", OPT_BW }, 1871 { "1x16-bw-thread,", "mem", "-p", "1", 1827 { "1x16-bw-thread,", "mem", "-p", "1", "-t", "16", "-T", "128", OPT_BW }, 1872 { "1x32-bw-thread,", "mem", "-p", "1", 1828 { "1x32-bw-thread,", "mem", "-p", "1", "-t", "32", "-T", "64", OPT_BW }, 1873 1829 1874 { " 2x3-bw-process,", "mem", "-p", "2", 1830 { " 2x3-bw-process,", "mem", "-p", "2", "-t", "3", "-P", "512", OPT_BW }, 1875 { " 4x4-bw-process,", "mem", "-p", "4", 1831 { " 4x4-bw-process,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_BW }, 1876 { " 4x6-bw-process,", "mem", "-p", "4", 1832 { " 4x6-bw-process,", "mem", "-p", "4", "-t", "6", "-P", "512", OPT_BW }, 1877 { " 4x8-bw-process,", "mem", "-p", "4", 1833 { " 4x8-bw-process,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW }, 1878 { " 4x8-bw-process-NOTHP,", 1834 { " 4x8-bw-process-NOTHP,", 1879 "mem", "-p", "4", 1835 "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW_NOTHP }, 1880 { " 3x3-bw-process,", "mem", "-p", "3", 1836 { " 3x3-bw-process,", "mem", "-p", "3", "-t", "3", "-P", "512", OPT_BW }, 1881 { " 5x5-bw-process,", "mem", "-p", "5", 1837 { " 5x5-bw-process,", "mem", "-p", "5", "-t", "5", "-P", "512", OPT_BW }, 1882 1838 1883 { "2x16-bw-process,", "mem", "-p", "2", 1839 { "2x16-bw-process,", "mem", "-p", "2", "-t", "16", "-P", "512", OPT_BW }, 1884 { "1x32-bw-process,", "mem", "-p", "1", 1840 { "1x32-bw-process,", "mem", "-p", "1", "-t", "32", "-P", "2048", OPT_BW }, 1885 1841 1886 { "numa02-bw,", "mem", "-p", "1", 1842 { "numa02-bw,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW }, 1887 { "numa02-bw-NOTHP,", "mem", "-p", "1", 1843 { "numa02-bw-NOTHP,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW_NOTHP }, 1888 { "numa01-bw-thread,", "mem", "-p", "2", 1844 { "numa01-bw-thread,", "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW }, 1889 { "numa01-bw-thread-NOTHP,", 1845 { "numa01-bw-thread-NOTHP,", 1890 "mem", "-p", "2", 1846 "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW_NOTHP }, 1891 }; 1847 }; 1892 1848 1893 static int bench_all(void) 1849 static int bench_all(void) 1894 { 1850 { 1895 int nr = ARRAY_SIZE(tests); 1851 int nr = ARRAY_SIZE(tests); 1896 int ret; 1852 int ret; 1897 int i; 1853 int i; 1898 1854 1899 ret = system("echo ' #'; echo ' # Run 1855 ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'"); 1900 BUG_ON(ret < 0); 1856 BUG_ON(ret < 0); 1901 1857 1902 for (i = 0; i < nr; i++) { 1858 for (i = 0; i < nr; i++) { 1903 run_bench_numa(tests[i][0], t 1859 run_bench_numa(tests[i][0], tests[i] + 1); 1904 } 1860 } 1905 1861 1906 printf("\n"); 1862 printf("\n"); 1907 1863 1908 return 0; 1864 return 0; 1909 } 1865 } 1910 1866 1911 int bench_numa(int argc, const char **argv) 1867 int bench_numa(int argc, const char **argv) 1912 { 1868 { 1913 init_params(&p0, "main,", argc, argv) 1869 init_params(&p0, "main,", argc, argv); 1914 argc = parse_options(argc, argv, opti 1870 argc = parse_options(argc, argv, options, bench_numa_usage, 0); 1915 if (argc) 1871 if (argc) 1916 goto err; 1872 goto err; 1917 1873 1918 if (p0.run_all) 1874 if (p0.run_all) 1919 return bench_all(); 1875 return bench_all(); 1920 1876 1921 if (__bench_numa(NULL)) 1877 if (__bench_numa(NULL)) 1922 goto err; 1878 goto err; 1923 1879 1924 return 0; 1880 return 0; 1925 1881 1926 err: 1882 err: 1927 usage_with_options(numa_usage, option 1883 usage_with_options(numa_usage, options); 1928 return -1; 1884 return -1; 1929 } 1885 } 1930 1886
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.