1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * builtin-kwork.c 3 * builtin-kwork.c 4 * 4 * 5 * Copyright (c) 2022 Huawei Inc, Yang Jihon 5 * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com> 6 */ 6 */ 7 7 8 #include "builtin.h" 8 #include "builtin.h" >> 9 #include "perf.h" 9 10 10 #include "util/data.h" 11 #include "util/data.h" 11 #include "util/evlist.h" 12 #include "util/evlist.h" 12 #include "util/evsel.h" 13 #include "util/evsel.h" 13 #include "util/header.h" 14 #include "util/header.h" 14 #include "util/kwork.h" 15 #include "util/kwork.h" 15 #include "util/debug.h" 16 #include "util/debug.h" 16 #include "util/session.h" 17 #include "util/session.h" 17 #include "util/symbol.h" 18 #include "util/symbol.h" 18 #include "util/thread.h" 19 #include "util/thread.h" 19 #include "util/string2.h" 20 #include "util/string2.h" 20 #include "util/callchain.h" 21 #include "util/callchain.h" 21 #include "util/evsel_fprintf.h" 22 #include "util/evsel_fprintf.h" 22 #include "util/util.h" << 23 23 24 #include <subcmd/pager.h> 24 #include <subcmd/pager.h> 25 #include <subcmd/parse-options.h> 25 #include <subcmd/parse-options.h> 26 #include <traceevent/event-parse.h> 26 #include <traceevent/event-parse.h> 27 27 28 #include <errno.h> 28 #include <errno.h> 29 #include <inttypes.h> 29 #include <inttypes.h> 30 #include <signal.h> 30 #include <signal.h> 31 #include <linux/err.h> 31 #include <linux/err.h> 32 #include <linux/time64.h> 32 #include <linux/time64.h> 33 #include <linux/zalloc.h> 33 #include <linux/zalloc.h> 34 34 35 /* 35 /* 36 * report header elements width 36 * report header elements width 37 */ 37 */ 38 #define PRINT_CPU_WIDTH 4 38 #define PRINT_CPU_WIDTH 4 39 #define PRINT_COUNT_WIDTH 9 39 #define PRINT_COUNT_WIDTH 9 40 #define PRINT_RUNTIME_WIDTH 10 40 #define PRINT_RUNTIME_WIDTH 10 41 #define PRINT_LATENCY_WIDTH 10 41 #define PRINT_LATENCY_WIDTH 10 42 #define PRINT_TIMESTAMP_WIDTH 17 42 #define PRINT_TIMESTAMP_WIDTH 17 43 #define PRINT_KWORK_NAME_WIDTH 30 43 #define PRINT_KWORK_NAME_WIDTH 30 44 #define RPINT_DECIMAL_WIDTH 3 44 #define RPINT_DECIMAL_WIDTH 3 45 #define PRINT_BRACKETPAIR_WIDTH 2 45 #define PRINT_BRACKETPAIR_WIDTH 2 46 #define PRINT_TIME_UNIT_SEC_WIDTH 2 46 #define PRINT_TIME_UNIT_SEC_WIDTH 2 47 #define PRINT_TIME_UNIT_MESC_WIDTH 3 47 #define PRINT_TIME_UNIT_MESC_WIDTH 3 48 #define PRINT_PID_WIDTH 7 << 49 #define PRINT_TASK_NAME_WIDTH 16 << 50 #define PRINT_CPU_USAGE_WIDTH 6 << 51 #define PRINT_CPU_USAGE_DECIMAL_WIDTH 2 << 52 #define PRINT_CPU_USAGE_HIST_WIDTH 30 << 53 #define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNT 48 #define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH) 54 #define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATE 49 #define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH) 55 #define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WI 50 #define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH) 56 #define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TI 51 #define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH) 57 52 58 struct sort_dimension { 53 struct sort_dimension { 59 const char *name; 54 const char *name; 60 int (*cmp)(struct kwork_wo 55 int (*cmp)(struct kwork_work *l, struct kwork_work *r); 61 struct list_head list; 56 struct list_head list; 62 }; 57 }; 63 58 64 static int id_cmp(struct kwork_work *l, struct 59 static int id_cmp(struct kwork_work *l, struct kwork_work *r) 65 { 60 { 66 if (l->cpu > r->cpu) 61 if (l->cpu > r->cpu) 67 return 1; 62 return 1; 68 if (l->cpu < r->cpu) 63 if (l->cpu < r->cpu) 69 return -1; 64 return -1; 70 65 71 if (l->id > r->id) 66 if (l->id > r->id) 72 return 1; 67 return 1; 73 if (l->id < r->id) 68 if (l->id < r->id) 74 return -1; 69 return -1; 75 70 76 return 0; 71 return 0; 77 } 72 } 78 73 79 static int count_cmp(struct kwork_work *l, str 74 static int count_cmp(struct kwork_work *l, struct kwork_work *r) 80 { 75 { 81 if (l->nr_atoms > r->nr_atoms) 76 if (l->nr_atoms > r->nr_atoms) 82 return 1; 77 return 1; 83 if (l->nr_atoms < r->nr_atoms) 78 if (l->nr_atoms < r->nr_atoms) 84 return -1; 79 return -1; 85 80 86 return 0; 81 return 0; 87 } 82 } 88 83 89 static int runtime_cmp(struct kwork_work *l, s 84 static int runtime_cmp(struct kwork_work *l, struct kwork_work *r) 90 { 85 { 91 if (l->total_runtime > r->total_runtim 86 if (l->total_runtime > r->total_runtime) 92 return 1; 87 return 1; 93 if (l->total_runtime < r->total_runtim 88 if (l->total_runtime < r->total_runtime) 94 return -1; 89 return -1; 95 90 96 return 0; 91 return 0; 97 } 92 } 98 93 99 static int max_runtime_cmp(struct kwork_work * 94 static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r) 100 { 95 { 101 if (l->max_runtime > r->max_runtime) 96 if (l->max_runtime > r->max_runtime) 102 return 1; 97 return 1; 103 if (l->max_runtime < r->max_runtime) 98 if (l->max_runtime < r->max_runtime) 104 return -1; 99 return -1; 105 100 106 return 0; 101 return 0; 107 } 102 } 108 103 109 static int avg_latency_cmp(struct kwork_work * 104 static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r) 110 { 105 { 111 u64 avgl, avgr; 106 u64 avgl, avgr; 112 107 113 if (!r->nr_atoms) 108 if (!r->nr_atoms) 114 return 1; 109 return 1; 115 if (!l->nr_atoms) 110 if (!l->nr_atoms) 116 return -1; 111 return -1; 117 112 118 avgl = l->total_latency / l->nr_atoms; 113 avgl = l->total_latency / l->nr_atoms; 119 avgr = r->total_latency / r->nr_atoms; 114 avgr = r->total_latency / r->nr_atoms; 120 115 121 if (avgl > avgr) 116 if (avgl > avgr) 122 return 1; 117 return 1; 123 if (avgl < avgr) 118 if (avgl < avgr) 124 return -1; 119 return -1; 125 120 126 return 0; 121 return 0; 127 } 122 } 128 123 129 static int max_latency_cmp(struct kwork_work * 124 static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r) 130 { 125 { 131 if (l->max_latency > r->max_latency) 126 if (l->max_latency > r->max_latency) 132 return 1; 127 return 1; 133 if (l->max_latency < r->max_latency) 128 if (l->max_latency < r->max_latency) 134 return -1; 129 return -1; 135 130 136 return 0; 131 return 0; 137 } 132 } 138 133 139 static int cpu_usage_cmp(struct kwork_work *l, << 140 { << 141 if (l->cpu_usage > r->cpu_usage) << 142 return 1; << 143 if (l->cpu_usage < r->cpu_usage) << 144 return -1; << 145 << 146 return 0; << 147 } << 148 << 149 static int id_or_cpu_r_cmp(struct kwork_work * << 150 { << 151 if (l->id < r->id) << 152 return 1; << 153 if (l->id > r->id) << 154 return -1; << 155 << 156 if (l->id != 0) << 157 return 0; << 158 << 159 if (l->cpu < r->cpu) << 160 return 1; << 161 if (l->cpu > r->cpu) << 162 return -1; << 163 << 164 return 0; << 165 } << 166 << 167 static int sort_dimension__add(struct perf_kwo 134 static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused, 168 const char *tok 135 const char *tok, struct list_head *list) 169 { 136 { 170 size_t i; 137 size_t i; 171 static struct sort_dimension max_sort_ 138 static struct sort_dimension max_sort_dimension = { 172 .name = "max", 139 .name = "max", 173 .cmp = max_runtime_cmp, 140 .cmp = max_runtime_cmp, 174 }; 141 }; 175 static struct sort_dimension id_sort_d 142 static struct sort_dimension id_sort_dimension = { 176 .name = "id", 143 .name = "id", 177 .cmp = id_cmp, 144 .cmp = id_cmp, 178 }; 145 }; 179 static struct sort_dimension runtime_s 146 static struct sort_dimension runtime_sort_dimension = { 180 .name = "runtime", 147 .name = "runtime", 181 .cmp = runtime_cmp, 148 .cmp = runtime_cmp, 182 }; 149 }; 183 static struct sort_dimension count_sor 150 static struct sort_dimension count_sort_dimension = { 184 .name = "count", 151 .name = "count", 185 .cmp = count_cmp, 152 .cmp = count_cmp, 186 }; 153 }; 187 static struct sort_dimension avg_sort_ 154 static struct sort_dimension avg_sort_dimension = { 188 .name = "avg", 155 .name = "avg", 189 .cmp = avg_latency_cmp, 156 .cmp = avg_latency_cmp, 190 }; 157 }; 191 static struct sort_dimension rate_sort << 192 .name = "rate", << 193 .cmp = cpu_usage_cmp, << 194 }; << 195 static struct sort_dimension tid_sort_ << 196 .name = "tid", << 197 .cmp = id_or_cpu_r_cmp, << 198 }; << 199 struct sort_dimension *available_sorts 158 struct sort_dimension *available_sorts[] = { 200 &id_sort_dimension, 159 &id_sort_dimension, 201 &max_sort_dimension, 160 &max_sort_dimension, 202 &count_sort_dimension, 161 &count_sort_dimension, 203 &runtime_sort_dimension, 162 &runtime_sort_dimension, 204 &avg_sort_dimension, 163 &avg_sort_dimension, 205 &rate_sort_dimension, << 206 &tid_sort_dimension, << 207 }; 164 }; 208 165 209 if (kwork->report == KWORK_REPORT_LATE 166 if (kwork->report == KWORK_REPORT_LATENCY) 210 max_sort_dimension.cmp = max_l 167 max_sort_dimension.cmp = max_latency_cmp; 211 168 212 for (i = 0; i < ARRAY_SIZE(available_s 169 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { 213 if (!strcmp(available_sorts[i] 170 if (!strcmp(available_sorts[i]->name, tok)) { 214 list_add_tail(&availab 171 list_add_tail(&available_sorts[i]->list, list); 215 return 0; 172 return 0; 216 } 173 } 217 } 174 } 218 175 219 return -1; 176 return -1; 220 } 177 } 221 178 222 static void setup_sorting(struct perf_kwork *k 179 static void setup_sorting(struct perf_kwork *kwork, 223 const struct option 180 const struct option *options, 224 const char * const u 181 const char * const usage_msg[]) 225 { 182 { 226 char *tmp, *tok, *str = strdup(kwork-> 183 char *tmp, *tok, *str = strdup(kwork->sort_order); 227 184 228 for (tok = strtok_r(str, ", ", &tmp); 185 for (tok = strtok_r(str, ", ", &tmp); 229 tok; tok = strtok_r(NULL, ", ", & 186 tok; tok = strtok_r(NULL, ", ", &tmp)) { 230 if (sort_dimension__add(kwork, 187 if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0) 231 usage_with_options_msg 188 usage_with_options_msg(usage_msg, options, 232 189 "Unknown --sort key: `%s'", tok); 233 } 190 } 234 191 235 pr_debug("Sort order: %s\n", kwork->so 192 pr_debug("Sort order: %s\n", kwork->sort_order); 236 free(str); 193 free(str); 237 } 194 } 238 195 239 static struct kwork_atom *atom_new(struct perf 196 static struct kwork_atom *atom_new(struct perf_kwork *kwork, 240 struct perf 197 struct perf_sample *sample) 241 { 198 { 242 unsigned long i; 199 unsigned long i; 243 struct kwork_atom_page *page; 200 struct kwork_atom_page *page; 244 struct kwork_atom *atom = NULL; 201 struct kwork_atom *atom = NULL; 245 202 246 list_for_each_entry(page, &kwork->atom 203 list_for_each_entry(page, &kwork->atom_page_list, list) { 247 if (!bitmap_full(page->bitmap, 204 if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) { 248 i = find_first_zero_bi 205 i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE); 249 BUG_ON(i >= NR_ATOM_PE 206 BUG_ON(i >= NR_ATOM_PER_PAGE); 250 atom = &page->atoms[i] 207 atom = &page->atoms[i]; 251 goto found_atom; 208 goto found_atom; 252 } 209 } 253 } 210 } 254 211 255 /* 212 /* 256 * new page 213 * new page 257 */ 214 */ 258 page = zalloc(sizeof(*page)); 215 page = zalloc(sizeof(*page)); 259 if (page == NULL) { 216 if (page == NULL) { 260 pr_err("Failed to zalloc kwork 217 pr_err("Failed to zalloc kwork atom page\n"); 261 return NULL; 218 return NULL; 262 } 219 } 263 220 264 i = 0; 221 i = 0; 265 atom = &page->atoms[0]; 222 atom = &page->atoms[0]; 266 list_add_tail(&page->list, &kwork->ato 223 list_add_tail(&page->list, &kwork->atom_page_list); 267 224 268 found_atom: 225 found_atom: 269 __set_bit(i, page->bitmap); 226 __set_bit(i, page->bitmap); 270 atom->time = sample->time; 227 atom->time = sample->time; 271 atom->prev = NULL; 228 atom->prev = NULL; 272 atom->page_addr = page; 229 atom->page_addr = page; 273 atom->bit_inpage = i; 230 atom->bit_inpage = i; 274 return atom; 231 return atom; 275 } 232 } 276 233 277 static void atom_free(struct kwork_atom *atom) 234 static void atom_free(struct kwork_atom *atom) 278 { 235 { 279 if (atom->prev != NULL) 236 if (atom->prev != NULL) 280 atom_free(atom->prev); 237 atom_free(atom->prev); 281 238 282 __clear_bit(atom->bit_inpage, 239 __clear_bit(atom->bit_inpage, 283 ((struct kwork_atom_page * 240 ((struct kwork_atom_page *)atom->page_addr)->bitmap); 284 } 241 } 285 242 286 static void atom_del(struct kwork_atom *atom) 243 static void atom_del(struct kwork_atom *atom) 287 { 244 { 288 list_del(&atom->list); 245 list_del(&atom->list); 289 atom_free(atom); 246 atom_free(atom); 290 } 247 } 291 248 292 static int work_cmp(struct list_head *list, 249 static int work_cmp(struct list_head *list, 293 struct kwork_work *l, stru 250 struct kwork_work *l, struct kwork_work *r) 294 { 251 { 295 int ret = 0; 252 int ret = 0; 296 struct sort_dimension *sort; 253 struct sort_dimension *sort; 297 254 298 BUG_ON(list_empty(list)); 255 BUG_ON(list_empty(list)); 299 256 300 list_for_each_entry(sort, list, list) 257 list_for_each_entry(sort, list, list) { 301 ret = sort->cmp(l, r); 258 ret = sort->cmp(l, r); 302 if (ret) 259 if (ret) 303 return ret; 260 return ret; 304 } 261 } 305 262 306 return ret; 263 return ret; 307 } 264 } 308 265 309 static struct kwork_work *work_search(struct r 266 static struct kwork_work *work_search(struct rb_root_cached *root, 310 struct k 267 struct kwork_work *key, 311 struct l 268 struct list_head *sort_list) 312 { 269 { 313 int cmp; 270 int cmp; 314 struct kwork_work *work; 271 struct kwork_work *work; 315 struct rb_node *node = root->rb_root.r 272 struct rb_node *node = root->rb_root.rb_node; 316 273 317 while (node) { 274 while (node) { 318 work = container_of(node, stru 275 work = container_of(node, struct kwork_work, node); 319 cmp = work_cmp(sort_list, key, 276 cmp = work_cmp(sort_list, key, work); 320 if (cmp > 0) 277 if (cmp > 0) 321 node = node->rb_left; 278 node = node->rb_left; 322 else if (cmp < 0) 279 else if (cmp < 0) 323 node = node->rb_right; 280 node = node->rb_right; 324 else { 281 else { 325 if (work->name == NULL 282 if (work->name == NULL) 326 work->name = k 283 work->name = key->name; 327 return work; 284 return work; 328 } 285 } 329 } 286 } 330 return NULL; 287 return NULL; 331 } 288 } 332 289 333 static void work_insert(struct rb_root_cached 290 static void work_insert(struct rb_root_cached *root, 334 struct kwork_work *key 291 struct kwork_work *key, struct list_head *sort_list) 335 { 292 { 336 int cmp; 293 int cmp; 337 bool leftmost = true; 294 bool leftmost = true; 338 struct kwork_work *cur; 295 struct kwork_work *cur; 339 struct rb_node **new = &(root->rb_root 296 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 340 297 341 while (*new) { 298 while (*new) { 342 cur = container_of(*new, struc 299 cur = container_of(*new, struct kwork_work, node); 343 parent = *new; 300 parent = *new; 344 cmp = work_cmp(sort_list, key, 301 cmp = work_cmp(sort_list, key, cur); 345 302 346 if (cmp > 0) 303 if (cmp > 0) 347 new = &((*new)->rb_lef 304 new = &((*new)->rb_left); 348 else { 305 else { 349 new = &((*new)->rb_rig 306 new = &((*new)->rb_right); 350 leftmost = false; 307 leftmost = false; 351 } 308 } 352 } 309 } 353 310 354 rb_link_node(&key->node, parent, new); 311 rb_link_node(&key->node, parent, new); 355 rb_insert_color_cached(&key->node, roo 312 rb_insert_color_cached(&key->node, root, leftmost); 356 } 313 } 357 314 358 static struct kwork_work *work_new(struct kwor 315 static struct kwork_work *work_new(struct kwork_work *key) 359 { 316 { 360 int i; 317 int i; 361 struct kwork_work *work = zalloc(sizeo 318 struct kwork_work *work = zalloc(sizeof(*work)); 362 319 363 if (work == NULL) { 320 if (work == NULL) { 364 pr_err("Failed to zalloc kwork 321 pr_err("Failed to zalloc kwork work\n"); 365 return NULL; 322 return NULL; 366 } 323 } 367 324 368 for (i = 0; i < KWORK_TRACE_MAX; i++) 325 for (i = 0; i < KWORK_TRACE_MAX; i++) 369 INIT_LIST_HEAD(&work->atom_lis 326 INIT_LIST_HEAD(&work->atom_list[i]); 370 327 371 work->id = key->id; 328 work->id = key->id; 372 work->cpu = key->cpu; 329 work->cpu = key->cpu; 373 work->name = key->name; 330 work->name = key->name; 374 work->class = key->class; 331 work->class = key->class; 375 return work; 332 return work; 376 } 333 } 377 334 378 static struct kwork_work *work_findnew(struct 335 static struct kwork_work *work_findnew(struct rb_root_cached *root, 379 struct 336 struct kwork_work *key, 380 struct 337 struct list_head *sort_list) 381 { 338 { 382 struct kwork_work *work = work_search( 339 struct kwork_work *work = work_search(root, key, sort_list); 383 340 384 if (work != NULL) 341 if (work != NULL) 385 return work; 342 return work; 386 343 387 work = work_new(key); 344 work = work_new(key); 388 if (work) 345 if (work) 389 work_insert(root, work, sort_l 346 work_insert(root, work, sort_list); 390 347 391 return work; 348 return work; 392 } 349 } 393 350 394 static void profile_update_timespan(struct per 351 static void profile_update_timespan(struct perf_kwork *kwork, 395 struct per 352 struct perf_sample *sample) 396 { 353 { 397 if (!kwork->summary) 354 if (!kwork->summary) 398 return; 355 return; 399 356 400 if ((kwork->timestart == 0) || (kwork- 357 if ((kwork->timestart == 0) || (kwork->timestart > sample->time)) 401 kwork->timestart = sample->tim 358 kwork->timestart = sample->time; 402 359 403 if (kwork->timeend < sample->time) 360 if (kwork->timeend < sample->time) 404 kwork->timeend = sample->time; 361 kwork->timeend = sample->time; 405 } 362 } 406 363 407 static bool profile_name_match(struct perf_kwo << 408 struct kwork_wo << 409 { << 410 if (kwork->profile_name && work->name << 411 (strcmp(work->name, kwork->profile << 412 return false; << 413 } << 414 << 415 return true; << 416 } << 417 << 418 static bool profile_event_match(struct perf_kw 364 static bool profile_event_match(struct perf_kwork *kwork, 419 struct kwork_w 365 struct kwork_work *work, 420 struct perf_sa 366 struct perf_sample *sample) 421 { 367 { 422 int cpu = work->cpu; 368 int cpu = work->cpu; 423 u64 time = sample->time; 369 u64 time = sample->time; 424 struct perf_time_interval *ptime = &kw 370 struct perf_time_interval *ptime = &kwork->ptime; 425 371 426 if ((kwork->cpu_list != NULL) && !test 372 if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap)) 427 return false; 373 return false; 428 374 429 if (((ptime->start != 0) && (ptime->st 375 if (((ptime->start != 0) && (ptime->start > time)) || 430 ((ptime->end != 0) && (ptime->end 376 ((ptime->end != 0) && (ptime->end < time))) 431 return false; 377 return false; 432 378 433 /* !! 379 if ((kwork->profile_name != NULL) && 434 * report top needs to collect the run !! 380 (work->name != NULL) && 435 * calculate the load of each core. !! 381 (strcmp(work->name, kwork->profile_name) != 0)) 436 */ << 437 if ((kwork->report != KWORK_REPORT_TOP << 438 !profile_name_match(kwork, work)) << 439 return false; 382 return false; 440 } << 441 383 442 profile_update_timespan(kwork, sample) 384 profile_update_timespan(kwork, sample); 443 return true; 385 return true; 444 } 386 } 445 387 446 static int work_push_atom(struct perf_kwork *k 388 static int work_push_atom(struct perf_kwork *kwork, 447 struct kwork_class * 389 struct kwork_class *class, 448 enum kwork_trace_typ 390 enum kwork_trace_type src_type, 449 enum kwork_trace_typ 391 enum kwork_trace_type dst_type, 450 struct evsel *evsel, 392 struct evsel *evsel, 451 struct perf_sample * 393 struct perf_sample *sample, 452 struct machine *mach 394 struct machine *machine, 453 struct kwork_work ** !! 395 struct kwork_work **ret_work) 454 bool overwrite) << 455 { 396 { 456 struct kwork_atom *atom, *dst_atom, *l !! 397 struct kwork_atom *atom, *dst_atom; 457 struct kwork_work *work, key; 398 struct kwork_work *work, key; 458 399 459 BUG_ON(class->work_init == NULL); 400 BUG_ON(class->work_init == NULL); 460 class->work_init(kwork, class, &key, s !! 401 class->work_init(class, &key, evsel, sample, machine); 461 402 462 atom = atom_new(kwork, sample); 403 atom = atom_new(kwork, sample); 463 if (atom == NULL) 404 if (atom == NULL) 464 return -1; 405 return -1; 465 406 466 work = work_findnew(&class->work_root, 407 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); 467 if (work == NULL) { 408 if (work == NULL) { 468 atom_free(atom); !! 409 free(atom); 469 return -1; 410 return -1; 470 } 411 } 471 412 472 if (!profile_event_match(kwork, work, !! 413 if (!profile_event_match(kwork, work, sample)) 473 atom_free(atom); << 474 return 0; 414 return 0; 475 } << 476 415 477 if (dst_type < KWORK_TRACE_MAX) { 416 if (dst_type < KWORK_TRACE_MAX) { 478 dst_atom = list_last_entry_or_ 417 dst_atom = list_last_entry_or_null(&work->atom_list[dst_type], 479 418 struct kwork_atom, list); 480 if (dst_atom != NULL) { 419 if (dst_atom != NULL) { 481 atom->prev = dst_atom; 420 atom->prev = dst_atom; 482 list_del(&dst_atom->li 421 list_del(&dst_atom->list); 483 } 422 } 484 } 423 } 485 424 486 if (ret_work != NULL) 425 if (ret_work != NULL) 487 *ret_work = work; 426 *ret_work = work; 488 427 489 if (overwrite) { << 490 last_atom = list_last_entry_or << 491 << 492 if (last_atom) { << 493 atom_del(last_atom); << 494 << 495 kwork->nr_skipped_even << 496 kwork->nr_skipped_even << 497 } << 498 } << 499 << 500 list_add_tail(&atom->list, &work->atom 428 list_add_tail(&atom->list, &work->atom_list[src_type]); 501 429 502 return 0; 430 return 0; 503 } 431 } 504 432 505 static struct kwork_atom *work_pop_atom(struct 433 static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork, 506 struct 434 struct kwork_class *class, 507 enum k 435 enum kwork_trace_type src_type, 508 enum k 436 enum kwork_trace_type dst_type, 509 struct 437 struct evsel *evsel, 510 struct 438 struct perf_sample *sample, 511 struct 439 struct machine *machine, 512 struct 440 struct kwork_work **ret_work) 513 { 441 { 514 struct kwork_atom *atom, *src_atom; 442 struct kwork_atom *atom, *src_atom; 515 struct kwork_work *work, key; 443 struct kwork_work *work, key; 516 444 517 BUG_ON(class->work_init == NULL); 445 BUG_ON(class->work_init == NULL); 518 class->work_init(kwork, class, &key, s !! 446 class->work_init(class, &key, evsel, sample, machine); 519 447 520 work = work_findnew(&class->work_root, 448 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); 521 if (ret_work != NULL) 449 if (ret_work != NULL) 522 *ret_work = work; 450 *ret_work = work; 523 451 524 if (work == NULL) 452 if (work == NULL) 525 return NULL; 453 return NULL; 526 454 527 if (!profile_event_match(kwork, work, 455 if (!profile_event_match(kwork, work, sample)) 528 return NULL; 456 return NULL; 529 457 530 atom = list_last_entry_or_null(&work-> 458 atom = list_last_entry_or_null(&work->atom_list[dst_type], 531 struct 459 struct kwork_atom, list); 532 if (atom != NULL) 460 if (atom != NULL) 533 return atom; 461 return atom; 534 462 535 src_atom = atom_new(kwork, sample); 463 src_atom = atom_new(kwork, sample); 536 if (src_atom != NULL) 464 if (src_atom != NULL) 537 list_add_tail(&src_atom->list, 465 list_add_tail(&src_atom->list, &work->atom_list[src_type]); 538 else { 466 else { 539 if (ret_work != NULL) 467 if (ret_work != NULL) 540 *ret_work = NULL; 468 *ret_work = NULL; 541 } 469 } 542 470 543 return NULL; 471 return NULL; 544 } 472 } 545 473 546 static struct kwork_work *find_work_by_id(stru << 547 u64 << 548 { << 549 struct rb_node *next; << 550 struct kwork_work *work; << 551 << 552 next = rb_first_cached(root); << 553 while (next) { << 554 work = rb_entry(next, struct k << 555 if ((cpu != -1 && work->id == << 556 (cpu == -1 && work->id == << 557 return work; << 558 << 559 next = rb_next(next); << 560 } << 561 << 562 return NULL; << 563 } << 564 << 565 static struct kwork_class *get_kwork_class(str << 566 enu << 567 { << 568 struct kwork_class *class; << 569 << 570 list_for_each_entry(class, &kwork->cla << 571 if (class->type == type) << 572 return class; << 573 } << 574 << 575 return NULL; << 576 } << 577 << 578 static void report_update_exit_event(struct kw 474 static void report_update_exit_event(struct kwork_work *work, 579 struct kw 475 struct kwork_atom *atom, 580 struct pe 476 struct perf_sample *sample) 581 { 477 { 582 u64 delta; 478 u64 delta; 583 u64 exit_time = sample->time; 479 u64 exit_time = sample->time; 584 u64 entry_time = atom->time; 480 u64 entry_time = atom->time; 585 481 586 if ((entry_time != 0) && (exit_time >= 482 if ((entry_time != 0) && (exit_time >= entry_time)) { 587 delta = exit_time - entry_time 483 delta = exit_time - entry_time; 588 if ((delta > work->max_runtime 484 if ((delta > work->max_runtime) || 589 (work->max_runtime == 0)) 485 (work->max_runtime == 0)) { 590 work->max_runtime = de 486 work->max_runtime = delta; 591 work->max_runtime_star 487 work->max_runtime_start = entry_time; 592 work->max_runtime_end 488 work->max_runtime_end = exit_time; 593 } 489 } 594 work->total_runtime += delta; 490 work->total_runtime += delta; 595 work->nr_atoms++; 491 work->nr_atoms++; 596 } 492 } 597 } 493 } 598 494 599 static int report_entry_event(struct perf_kwor 495 static int report_entry_event(struct perf_kwork *kwork, 600 struct kwork_cla 496 struct kwork_class *class, 601 struct evsel *ev 497 struct evsel *evsel, 602 struct perf_samp 498 struct perf_sample *sample, 603 struct machine * 499 struct machine *machine) 604 { 500 { 605 return work_push_atom(kwork, class, KW 501 return work_push_atom(kwork, class, KWORK_TRACE_ENTRY, 606 KWORK_TRACE_MAX, 502 KWORK_TRACE_MAX, evsel, sample, 607 machine, NULL, t !! 503 machine, NULL); 608 } 504 } 609 505 610 static int report_exit_event(struct perf_kwork 506 static int report_exit_event(struct perf_kwork *kwork, 611 struct kwork_clas 507 struct kwork_class *class, 612 struct evsel *evs 508 struct evsel *evsel, 613 struct perf_sampl 509 struct perf_sample *sample, 614 struct machine *m 510 struct machine *machine) 615 { 511 { 616 struct kwork_atom *atom = NULL; 512 struct kwork_atom *atom = NULL; 617 struct kwork_work *work = NULL; 513 struct kwork_work *work = NULL; 618 514 619 atom = work_pop_atom(kwork, class, KWO 515 atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT, 620 KWORK_TRACE_ENTRY 516 KWORK_TRACE_ENTRY, evsel, sample, 621 machine, &work); 517 machine, &work); 622 if (work == NULL) 518 if (work == NULL) 623 return -1; 519 return -1; 624 520 625 if (atom != NULL) { 521 if (atom != NULL) { 626 report_update_exit_event(work, 522 report_update_exit_event(work, atom, sample); 627 atom_del(atom); 523 atom_del(atom); 628 } 524 } 629 525 630 return 0; 526 return 0; 631 } 527 } 632 528 633 static void latency_update_entry_event(struct 529 static void latency_update_entry_event(struct kwork_work *work, 634 struct 530 struct kwork_atom *atom, 635 struct 531 struct perf_sample *sample) 636 { 532 { 637 u64 delta; 533 u64 delta; 638 u64 entry_time = sample->time; 534 u64 entry_time = sample->time; 639 u64 raise_time = atom->time; 535 u64 raise_time = atom->time; 640 536 641 if ((raise_time != 0) && (entry_time > 537 if ((raise_time != 0) && (entry_time >= raise_time)) { 642 delta = entry_time - raise_tim 538 delta = entry_time - raise_time; 643 if ((delta > work->max_latency 539 if ((delta > work->max_latency) || 644 (work->max_latency == 0)) 540 (work->max_latency == 0)) { 645 work->max_latency = de 541 work->max_latency = delta; 646 work->max_latency_star 542 work->max_latency_start = raise_time; 647 work->max_latency_end 543 work->max_latency_end = entry_time; 648 } 544 } 649 work->total_latency += delta; 545 work->total_latency += delta; 650 work->nr_atoms++; 546 work->nr_atoms++; 651 } 547 } 652 } 548 } 653 549 654 static int latency_raise_event(struct perf_kwo 550 static int latency_raise_event(struct perf_kwork *kwork, 655 struct kwork_cl 551 struct kwork_class *class, 656 struct evsel *e 552 struct evsel *evsel, 657 struct perf_sam 553 struct perf_sample *sample, 658 struct machine 554 struct machine *machine) 659 { 555 { 660 return work_push_atom(kwork, class, KW 556 return work_push_atom(kwork, class, KWORK_TRACE_RAISE, 661 KWORK_TRACE_MAX, 557 KWORK_TRACE_MAX, evsel, sample, 662 machine, NULL, t !! 558 machine, NULL); 663 } 559 } 664 560 665 static int latency_entry_event(struct perf_kwo 561 static int latency_entry_event(struct perf_kwork *kwork, 666 struct kwork_cl 562 struct kwork_class *class, 667 struct evsel *e 563 struct evsel *evsel, 668 struct perf_sam 564 struct perf_sample *sample, 669 struct machine 565 struct machine *machine) 670 { 566 { 671 struct kwork_atom *atom = NULL; 567 struct kwork_atom *atom = NULL; 672 struct kwork_work *work = NULL; 568 struct kwork_work *work = NULL; 673 569 674 atom = work_pop_atom(kwork, class, KWO 570 atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY, 675 KWORK_TRACE_RAISE 571 KWORK_TRACE_RAISE, evsel, sample, 676 machine, &work); 572 machine, &work); 677 if (work == NULL) 573 if (work == NULL) 678 return -1; 574 return -1; 679 575 680 if (atom != NULL) { 576 if (atom != NULL) { 681 latency_update_entry_event(wor 577 latency_update_entry_event(work, atom, sample); 682 atom_del(atom); 578 atom_del(atom); 683 } 579 } 684 580 685 return 0; 581 return 0; 686 } 582 } 687 583 688 static void timehist_save_callchain(struct per 584 static void timehist_save_callchain(struct perf_kwork *kwork, 689 struct per 585 struct perf_sample *sample, 690 struct evs 586 struct evsel *evsel, 691 struct mac 587 struct machine *machine) 692 { 588 { 693 struct symbol *sym; 589 struct symbol *sym; 694 struct thread *thread; 590 struct thread *thread; 695 struct callchain_cursor_node *node; 591 struct callchain_cursor_node *node; 696 struct callchain_cursor *cursor; !! 592 struct callchain_cursor *cursor = &callchain_cursor; 697 593 698 if (!kwork->show_callchain || sample-> 594 if (!kwork->show_callchain || sample->callchain == NULL) 699 return; 595 return; 700 596 701 /* want main thread for process - has 597 /* want main thread for process - has maps */ 702 thread = machine__findnew_thread(machi 598 thread = machine__findnew_thread(machine, sample->pid, sample->pid); 703 if (thread == NULL) { 599 if (thread == NULL) { 704 pr_debug("Failed to get thread 600 pr_debug("Failed to get thread for pid %d\n", sample->pid); 705 return; 601 return; 706 } 602 } 707 603 708 cursor = get_tls_callchain_cursor(); << 709 << 710 if (thread__resolve_callchain(thread, 604 if (thread__resolve_callchain(thread, cursor, evsel, sample, 711 NULL, NU 605 NULL, NULL, kwork->max_stack + 2) != 0) { 712 pr_debug("Failed to resolve ca 606 pr_debug("Failed to resolve callchain, skipping\n"); 713 goto out_put; 607 goto out_put; 714 } 608 } 715 609 716 callchain_cursor_commit(cursor); 610 callchain_cursor_commit(cursor); 717 611 718 while (true) { 612 while (true) { 719 node = callchain_cursor_curren 613 node = callchain_cursor_current(cursor); 720 if (node == NULL) 614 if (node == NULL) 721 break; 615 break; 722 616 723 sym = node->ms.sym; 617 sym = node->ms.sym; 724 if (sym) { 618 if (sym) { 725 if (!strcmp(sym->name, 619 if (!strcmp(sym->name, "__softirqentry_text_start") || 726 !strcmp(sym->name, 620 !strcmp(sym->name, "__do_softirq")) 727 sym->ignore = 621 sym->ignore = 1; 728 } 622 } 729 623 730 callchain_cursor_advance(curso 624 callchain_cursor_advance(cursor); 731 } 625 } 732 626 733 out_put: 627 out_put: 734 thread__put(thread); 628 thread__put(thread); 735 } 629 } 736 630 737 static void timehist_print_event(struct perf_k 631 static void timehist_print_event(struct perf_kwork *kwork, 738 struct kwork_ 632 struct kwork_work *work, 739 struct kwork_ 633 struct kwork_atom *atom, 740 struct perf_s 634 struct perf_sample *sample, 741 struct addr_l 635 struct addr_location *al) 742 { 636 { 743 char entrytime[32], exittime[32]; 637 char entrytime[32], exittime[32]; 744 char kwork_name[PRINT_KWORK_NAME_WIDTH 638 char kwork_name[PRINT_KWORK_NAME_WIDTH]; 745 639 746 /* 640 /* 747 * runtime start 641 * runtime start 748 */ 642 */ 749 timestamp__scnprintf_usec(atom->time, 643 timestamp__scnprintf_usec(atom->time, 750 entrytime, s 644 entrytime, sizeof(entrytime)); 751 printf(" %*s ", PRINT_TIMESTAMP_WIDTH, 645 printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime); 752 646 753 /* 647 /* 754 * runtime end 648 * runtime end 755 */ 649 */ 756 timestamp__scnprintf_usec(sample->time 650 timestamp__scnprintf_usec(sample->time, 757 exittime, si 651 exittime, sizeof(exittime)); 758 printf(" %*s ", PRINT_TIMESTAMP_WIDTH, 652 printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime); 759 653 760 /* 654 /* 761 * cpu 655 * cpu 762 */ 656 */ 763 printf(" [%0*d] ", PRINT_CPU_WIDTH, wo 657 printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu); 764 658 765 /* 659 /* 766 * kwork name 660 * kwork name 767 */ 661 */ 768 if (work->class && work->class->work_n 662 if (work->class && work->class->work_name) { 769 work->class->work_name(work, k 663 work->class->work_name(work, kwork_name, 770 PRINT_K 664 PRINT_KWORK_NAME_WIDTH); 771 printf(" %-*s ", PRINT_KWORK_N 665 printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name); 772 } else 666 } else 773 printf(" %-*s ", PRINT_KWORK_N 667 printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, ""); 774 668 775 /* 669 /* 776 *runtime 670 *runtime 777 */ 671 */ 778 printf(" %*.*f ", 672 printf(" %*.*f ", 779 PRINT_RUNTIME_WIDTH, RPINT_DECI 673 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH, 780 (double)(sample->time - atom->t 674 (double)(sample->time - atom->time) / NSEC_PER_MSEC); 781 675 782 /* 676 /* 783 * delaytime 677 * delaytime 784 */ 678 */ 785 if (atom->prev != NULL) 679 if (atom->prev != NULL) 786 printf(" %*.*f ", PRINT_LATENC 680 printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH, 787 (double)(atom->time - a 681 (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC); 788 else 682 else 789 printf(" %*s ", PRINT_LATENCY_ 683 printf(" %*s ", PRINT_LATENCY_WIDTH, " "); 790 684 791 /* 685 /* 792 * callchain 686 * callchain 793 */ 687 */ 794 if (kwork->show_callchain) { 688 if (kwork->show_callchain) { 795 struct callchain_cursor *curso << 796 << 797 if (cursor == NULL) << 798 return; << 799 << 800 printf(" "); 689 printf(" "); 801 << 802 sample__fprintf_sym(sample, al 690 sample__fprintf_sym(sample, al, 0, 803 EVSEL__PRI 691 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE | 804 EVSEL__PRI 692 EVSEL__PRINT_CALLCHAIN_ARROW | 805 EVSEL__PRI 693 EVSEL__PRINT_SKIP_IGNORED, 806 cursor, sy !! 694 &callchain_cursor, symbol_conf.bt_stop_list, 807 stdout); 695 stdout); 808 } 696 } 809 697 810 printf("\n"); 698 printf("\n"); 811 } 699 } 812 700 813 static int timehist_raise_event(struct perf_kw 701 static int timehist_raise_event(struct perf_kwork *kwork, 814 struct kwork_c 702 struct kwork_class *class, 815 struct evsel * 703 struct evsel *evsel, 816 struct perf_sa 704 struct perf_sample *sample, 817 struct machine 705 struct machine *machine) 818 { 706 { 819 return work_push_atom(kwork, class, KW 707 return work_push_atom(kwork, class, KWORK_TRACE_RAISE, 820 KWORK_TRACE_MAX, 708 KWORK_TRACE_MAX, evsel, sample, 821 machine, NULL, t !! 709 machine, NULL); 822 } 710 } 823 711 824 static int timehist_entry_event(struct perf_kw 712 static int timehist_entry_event(struct perf_kwork *kwork, 825 struct kwork_c 713 struct kwork_class *class, 826 struct evsel * 714 struct evsel *evsel, 827 struct perf_sa 715 struct perf_sample *sample, 828 struct machine 716 struct machine *machine) 829 { 717 { 830 int ret; 718 int ret; 831 struct kwork_work *work = NULL; 719 struct kwork_work *work = NULL; 832 720 833 ret = work_push_atom(kwork, class, KWO 721 ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY, 834 KWORK_TRACE_RAISE 722 KWORK_TRACE_RAISE, evsel, sample, 835 machine, &work, t !! 723 machine, &work); 836 if (ret) 724 if (ret) 837 return ret; 725 return ret; 838 726 839 if (work != NULL) 727 if (work != NULL) 840 timehist_save_callchain(kwork, 728 timehist_save_callchain(kwork, sample, evsel, machine); 841 729 842 return 0; 730 return 0; 843 } 731 } 844 732 845 static int timehist_exit_event(struct perf_kwo 733 static int timehist_exit_event(struct perf_kwork *kwork, 846 struct kwork_cl 734 struct kwork_class *class, 847 struct evsel *e 735 struct evsel *evsel, 848 struct perf_sam 736 struct perf_sample *sample, 849 struct machine 737 struct machine *machine) 850 { 738 { 851 struct kwork_atom *atom = NULL; 739 struct kwork_atom *atom = NULL; 852 struct kwork_work *work = NULL; 740 struct kwork_work *work = NULL; 853 struct addr_location al; 741 struct addr_location al; 854 int ret = 0; << 855 742 856 addr_location__init(&al); << 857 if (machine__resolve(machine, &al, sam 743 if (machine__resolve(machine, &al, sample) < 0) { 858 pr_debug("Problem processing e 744 pr_debug("Problem processing event, skipping it\n"); 859 ret = -1; !! 745 return -1; 860 goto out; << 861 } 746 } 862 747 863 atom = work_pop_atom(kwork, class, KWO 748 atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT, 864 KWORK_TRACE_ENTRY 749 KWORK_TRACE_ENTRY, evsel, sample, 865 machine, &work); 750 machine, &work); 866 if (work == NULL) { !! 751 if (work == NULL) 867 ret = -1; !! 752 return -1; 868 goto out; << 869 } << 870 753 871 if (atom != NULL) { 754 if (atom != NULL) { 872 work->nr_atoms++; 755 work->nr_atoms++; 873 timehist_print_event(kwork, wo 756 timehist_print_event(kwork, work, atom, sample, &al); 874 atom_del(atom); 757 atom_del(atom); 875 } 758 } 876 759 877 out: << 878 addr_location__exit(&al); << 879 return ret; << 880 } << 881 << 882 static void top_update_runtime(struct kwork_wo << 883 struct kwork_at << 884 struct perf_sam << 885 { << 886 u64 delta; << 887 u64 exit_time = sample->time; << 888 u64 entry_time = atom->time; << 889 << 890 if ((entry_time != 0) && (exit_time >= << 891 delta = exit_time - entry_time << 892 work->total_runtime += delta; << 893 } << 894 } << 895 << 896 static int top_entry_event(struct perf_kwork * << 897 struct kwork_class << 898 struct evsel *evsel << 899 struct perf_sample << 900 struct machine *mac << 901 { << 902 return work_push_atom(kwork, class, KW << 903 KWORK_TRACE_MAX, << 904 machine, NULL, t << 905 } << 906 << 907 static int top_exit_event(struct perf_kwork *k << 908 struct kwork_class * << 909 struct evsel *evsel, << 910 struct perf_sample * << 911 struct machine *mach << 912 { << 913 struct kwork_work *work, *sched_work; << 914 struct kwork_class *sched_class; << 915 struct kwork_atom *atom; << 916 << 917 atom = work_pop_atom(kwork, class, KWO << 918 KWORK_TRACE_ENTRY << 919 machine, &work); << 920 if (!work) << 921 return -1; << 922 << 923 if (atom) { << 924 sched_class = get_kwork_class( << 925 if (sched_class) { << 926 sched_work = find_work << 927 << 928 if (sched_work) << 929 top_update_run << 930 } << 931 atom_del(atom); << 932 } << 933 << 934 return 0; 760 return 0; 935 } 761 } 936 762 937 static int top_sched_switch_event(struct perf_ << 938 struct kwork << 939 struct evsel << 940 struct perf_ << 941 struct machi << 942 { << 943 struct kwork_atom *atom; << 944 struct kwork_work *work; << 945 << 946 atom = work_pop_atom(kwork, class, KWO << 947 KWORK_TRACE_ENTRY << 948 machine, &work); << 949 if (!work) << 950 return -1; << 951 << 952 if (atom) { << 953 top_update_runtime(work, atom, << 954 atom_del(atom); << 955 } << 956 << 957 return top_entry_event(kwork, class, e << 958 } << 959 << 960 static struct kwork_class kwork_irq; 763 static struct kwork_class kwork_irq; 961 static int process_irq_handler_entry_event(str 764 static int process_irq_handler_entry_event(struct perf_tool *tool, 962 str 765 struct evsel *evsel, 963 str 766 struct perf_sample *sample, 964 str 767 struct machine *machine) 965 { 768 { 966 struct perf_kwork *kwork = container_o 769 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); 967 770 968 if (kwork->tp_handler->entry_event) 771 if (kwork->tp_handler->entry_event) 969 return kwork->tp_handler->entr 772 return kwork->tp_handler->entry_event(kwork, &kwork_irq, 970 773 evsel, sample, machine); 971 return 0; 774 return 0; 972 } 775 } 973 776 974 static int process_irq_handler_exit_event(stru 777 static int process_irq_handler_exit_event(struct perf_tool *tool, 975 stru 778 struct evsel *evsel, 976 stru 779 struct perf_sample *sample, 977 stru 780 struct machine *machine) 978 { 781 { 979 struct perf_kwork *kwork = container_o 782 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); 980 783 981 if (kwork->tp_handler->exit_event) 784 if (kwork->tp_handler->exit_event) 982 return kwork->tp_handler->exit 785 return kwork->tp_handler->exit_event(kwork, &kwork_irq, 983 786 evsel, sample, machine); 984 return 0; 787 return 0; 985 } 788 } 986 789 987 const struct evsel_str_handler irq_tp_handlers 790 const struct evsel_str_handler irq_tp_handlers[] = { 988 { "irq:irq_handler_entry", process_irq 791 { "irq:irq_handler_entry", process_irq_handler_entry_event, }, 989 { "irq:irq_handler_exit", process_irq 792 { "irq:irq_handler_exit", process_irq_handler_exit_event, }, 990 }; 793 }; 991 794 992 static int irq_class_init(struct kwork_class * 795 static int irq_class_init(struct kwork_class *class, 993 struct perf_session 796 struct perf_session *session) 994 { 797 { 995 if (perf_session__set_tracepoints_hand 798 if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) { 996 pr_err("Failed to set irq trac 799 pr_err("Failed to set irq tracepoints handlers\n"); 997 return -1; 800 return -1; 998 } 801 } 999 802 1000 class->work_root = RB_ROOT_CACHED; 803 class->work_root = RB_ROOT_CACHED; 1001 return 0; 804 return 0; 1002 } 805 } 1003 806 1004 static void irq_work_init(struct perf_kwork * !! 807 static void irq_work_init(struct kwork_class *class, 1005 struct kwork_class << 1006 struct kwork_work * 808 struct kwork_work *work, 1007 enum kwork_trace_ty << 1008 struct evsel *evsel 809 struct evsel *evsel, 1009 struct perf_sample 810 struct perf_sample *sample, 1010 struct machine *mac 811 struct machine *machine __maybe_unused) 1011 { 812 { 1012 work->class = class; 813 work->class = class; 1013 work->cpu = sample->cpu; 814 work->cpu = sample->cpu; 1014 !! 815 work->id = evsel__intval(evsel, sample, "irq"); 1015 if (kwork->report == KWORK_REPORT_TOP !! 816 work->name = evsel__strval(evsel, sample, "name"); 1016 work->id = evsel__intval_comm << 1017 work->name = NULL; << 1018 } else { << 1019 work->id = evsel__intval(evse << 1020 work->name = evsel__strval(ev << 1021 } << 1022 } 817 } 1023 818 1024 static void irq_work_name(struct kwork_work * 819 static void irq_work_name(struct kwork_work *work, char *buf, int len) 1025 { 820 { 1026 snprintf(buf, len, "%s:%" PRIu64 "", 821 snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id); 1027 } 822 } 1028 823 1029 static struct kwork_class kwork_irq = { 824 static struct kwork_class kwork_irq = { 1030 .name = "irq", 825 .name = "irq", 1031 .type = KWORK_CLASS_IRQ, 826 .type = KWORK_CLASS_IRQ, 1032 .nr_tracepoints = 2, 827 .nr_tracepoints = 2, 1033 .tp_handlers = irq_tp_handlers, 828 .tp_handlers = irq_tp_handlers, 1034 .class_init = irq_class_init, 829 .class_init = irq_class_init, 1035 .work_init = irq_work_init, 830 .work_init = irq_work_init, 1036 .work_name = irq_work_name, 831 .work_name = irq_work_name, 1037 }; 832 }; 1038 833 1039 static struct kwork_class kwork_softirq; 834 static struct kwork_class kwork_softirq; 1040 static int process_softirq_raise_event(struct 835 static int process_softirq_raise_event(struct perf_tool *tool, 1041 struct 836 struct evsel *evsel, 1042 struct 837 struct perf_sample *sample, 1043 struct 838 struct machine *machine) 1044 { 839 { 1045 struct perf_kwork *kwork = container_ 840 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); 1046 841 1047 if (kwork->tp_handler->raise_event) 842 if (kwork->tp_handler->raise_event) 1048 return kwork->tp_handler->rai 843 return kwork->tp_handler->raise_event(kwork, &kwork_softirq, 1049 844 evsel, sample, machine); 1050 845 1051 return 0; 846 return 0; 1052 } 847 } 1053 848 1054 static int process_softirq_entry_event(struct 849 static int process_softirq_entry_event(struct perf_tool *tool, 1055 struct 850 struct evsel *evsel, 1056 struct 851 struct perf_sample *sample, 1057 struct 852 struct machine *machine) 1058 { 853 { 1059 struct perf_kwork *kwork = container_ 854 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); 1060 855 1061 if (kwork->tp_handler->entry_event) 856 if (kwork->tp_handler->entry_event) 1062 return kwork->tp_handler->ent 857 return kwork->tp_handler->entry_event(kwork, &kwork_softirq, 1063 858 evsel, sample, machine); 1064 859 1065 return 0; 860 return 0; 1066 } 861 } 1067 862 1068 static int process_softirq_exit_event(struct 863 static int process_softirq_exit_event(struct perf_tool *tool, 1069 struct 864 struct evsel *evsel, 1070 struct 865 struct perf_sample *sample, 1071 struct 866 struct machine *machine) 1072 { 867 { 1073 struct perf_kwork *kwork = container_ 868 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); 1074 869 1075 if (kwork->tp_handler->exit_event) 870 if (kwork->tp_handler->exit_event) 1076 return kwork->tp_handler->exi 871 return kwork->tp_handler->exit_event(kwork, &kwork_softirq, 1077 872 evsel, sample, machine); 1078 873 1079 return 0; 874 return 0; 1080 } 875 } 1081 876 1082 const struct evsel_str_handler softirq_tp_han 877 const struct evsel_str_handler softirq_tp_handlers[] = { 1083 { "irq:softirq_raise", process_softir 878 { "irq:softirq_raise", process_softirq_raise_event, }, 1084 { "irq:softirq_entry", process_softir 879 { "irq:softirq_entry", process_softirq_entry_event, }, 1085 { "irq:softirq_exit", process_softir 880 { "irq:softirq_exit", process_softirq_exit_event, }, 1086 }; 881 }; 1087 882 1088 static int softirq_class_init(struct kwork_cl 883 static int softirq_class_init(struct kwork_class *class, 1089 struct perf_ses 884 struct perf_session *session) 1090 { 885 { 1091 if (perf_session__set_tracepoints_han 886 if (perf_session__set_tracepoints_handlers(session, 1092 887 softirq_tp_handlers)) { 1093 pr_err("Failed to set softirq 888 pr_err("Failed to set softirq tracepoints handlers\n"); 1094 return -1; 889 return -1; 1095 } 890 } 1096 891 1097 class->work_root = RB_ROOT_CACHED; 892 class->work_root = RB_ROOT_CACHED; 1098 return 0; 893 return 0; 1099 } 894 } 1100 895 1101 static char *evsel__softirq_name(struct evsel 896 static char *evsel__softirq_name(struct evsel *evsel, u64 num) 1102 { 897 { 1103 char *name = NULL; 898 char *name = NULL; 1104 bool found = false; 899 bool found = false; 1105 struct tep_print_flag_sym *sym = NULL 900 struct tep_print_flag_sym *sym = NULL; 1106 struct tep_print_arg *args = evsel->t 901 struct tep_print_arg *args = evsel->tp_format->print_fmt.args; 1107 902 1108 if ((args == NULL) || (args->next == 903 if ((args == NULL) || (args->next == NULL)) 1109 return NULL; 904 return NULL; 1110 905 1111 /* skip softirq field: "REC->vec" */ 906 /* skip softirq field: "REC->vec" */ 1112 for (sym = args->next->symbol.symbols 907 for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) { 1113 if ((eval_flag(sym->value) == 908 if ((eval_flag(sym->value) == (unsigned long long)num) && 1114 (strlen(sym->str) != 0)) 909 (strlen(sym->str) != 0)) { 1115 found = true; 910 found = true; 1116 break; 911 break; 1117 } 912 } 1118 } 913 } 1119 914 1120 if (!found) 915 if (!found) 1121 return NULL; 916 return NULL; 1122 917 1123 name = strdup(sym->str); 918 name = strdup(sym->str); 1124 if (name == NULL) { 919 if (name == NULL) { 1125 pr_err("Failed to copy symbol 920 pr_err("Failed to copy symbol name\n"); 1126 return NULL; 921 return NULL; 1127 } 922 } 1128 return name; 923 return name; 1129 } 924 } 1130 925 1131 static void softirq_work_init(struct perf_kwo !! 926 static void softirq_work_init(struct kwork_class *class, 1132 struct kwork_cl << 1133 struct kwork_wo 927 struct kwork_work *work, 1134 enum kwork_trac << 1135 struct evsel *e 928 struct evsel *evsel, 1136 struct perf_sam 929 struct perf_sample *sample, 1137 struct machine 930 struct machine *machine __maybe_unused) 1138 { 931 { 1139 u64 num; !! 932 u64 num = evsel__intval(evsel, sample, "vec"); 1140 933 >> 934 work->id = num; 1141 work->class = class; 935 work->class = class; 1142 work->cpu = sample->cpu; 936 work->cpu = sample->cpu; 1143 !! 937 work->name = evsel__softirq_name(evsel, num); 1144 if (kwork->report == KWORK_REPORT_TOP << 1145 work->id = evsel__intval_comm << 1146 work->name = NULL; << 1147 } else { << 1148 num = evsel__intval(evsel, sa << 1149 work->id = num; << 1150 work->name = evsel__softirq_n << 1151 } << 1152 } 938 } 1153 939 1154 static void softirq_work_name(struct kwork_wo 940 static void softirq_work_name(struct kwork_work *work, char *buf, int len) 1155 { 941 { 1156 snprintf(buf, len, "(s)%s:%" PRIu64 " 942 snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id); 1157 } 943 } 1158 944 1159 static struct kwork_class kwork_softirq = { 945 static struct kwork_class kwork_softirq = { 1160 .name = "softirq", 946 .name = "softirq", 1161 .type = KWORK_CLASS_SOFTIRQ 947 .type = KWORK_CLASS_SOFTIRQ, 1162 .nr_tracepoints = 3, 948 .nr_tracepoints = 3, 1163 .tp_handlers = softirq_tp_handlers 949 .tp_handlers = softirq_tp_handlers, 1164 .class_init = softirq_class_init, 950 .class_init = softirq_class_init, 1165 .work_init = softirq_work_init, 951 .work_init = softirq_work_init, 1166 .work_name = softirq_work_name, 952 .work_name = softirq_work_name, 1167 }; 953 }; 1168 954 1169 static struct kwork_class kwork_workqueue; 955 static struct kwork_class kwork_workqueue; 1170 static int process_workqueue_activate_work_ev 956 static int process_workqueue_activate_work_event(struct perf_tool *tool, 1171 957 struct evsel *evsel, 1172 958 struct perf_sample *sample, 1173 959 struct machine *machine) 1174 { 960 { 1175 struct perf_kwork *kwork = container_ 961 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); 1176 962 1177 if (kwork->tp_handler->raise_event) 963 if (kwork->tp_handler->raise_event) 1178 return kwork->tp_handler->rai 964 return kwork->tp_handler->raise_event(kwork, &kwork_workqueue, 1179 965 evsel, sample, machine); 1180 966 1181 return 0; 967 return 0; 1182 } 968 } 1183 969 1184 static int process_workqueue_execute_start_ev 970 static int process_workqueue_execute_start_event(struct perf_tool *tool, 1185 971 struct evsel *evsel, 1186 972 struct perf_sample *sample, 1187 973 struct machine *machine) 1188 { 974 { 1189 struct perf_kwork *kwork = container_ 975 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); 1190 976 1191 if (kwork->tp_handler->entry_event) 977 if (kwork->tp_handler->entry_event) 1192 return kwork->tp_handler->ent 978 return kwork->tp_handler->entry_event(kwork, &kwork_workqueue, 1193 979 evsel, sample, machine); 1194 980 1195 return 0; 981 return 0; 1196 } 982 } 1197 983 1198 static int process_workqueue_execute_end_even 984 static int process_workqueue_execute_end_event(struct perf_tool *tool, 1199 985 struct evsel *evsel, 1200 986 struct perf_sample *sample, 1201 987 struct machine *machine) 1202 { 988 { 1203 struct perf_kwork *kwork = container_ 989 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); 1204 990 1205 if (kwork->tp_handler->exit_event) 991 if (kwork->tp_handler->exit_event) 1206 return kwork->tp_handler->exi 992 return kwork->tp_handler->exit_event(kwork, &kwork_workqueue, 1207 993 evsel, sample, machine); 1208 994 1209 return 0; 995 return 0; 1210 } 996 } 1211 997 1212 const struct evsel_str_handler workqueue_tp_h 998 const struct evsel_str_handler workqueue_tp_handlers[] = { 1213 { "workqueue:workqueue_activate_work" 999 { "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, }, 1214 { "workqueue:workqueue_execute_start" 1000 { "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, }, 1215 { "workqueue:workqueue_execute_end", 1001 { "workqueue:workqueue_execute_end", process_workqueue_execute_end_event, }, 1216 }; 1002 }; 1217 1003 1218 static int workqueue_class_init(struct kwork_ 1004 static int workqueue_class_init(struct kwork_class *class, 1219 struct perf_s 1005 struct perf_session *session) 1220 { 1006 { 1221 if (perf_session__set_tracepoints_han 1007 if (perf_session__set_tracepoints_handlers(session, 1222 1008 workqueue_tp_handlers)) { 1223 pr_err("Failed to set workque 1009 pr_err("Failed to set workqueue tracepoints handlers\n"); 1224 return -1; 1010 return -1; 1225 } 1011 } 1226 1012 1227 class->work_root = RB_ROOT_CACHED; 1013 class->work_root = RB_ROOT_CACHED; 1228 return 0; 1014 return 0; 1229 } 1015 } 1230 1016 1231 static void workqueue_work_init(struct perf_k !! 1017 static void workqueue_work_init(struct kwork_class *class, 1232 struct kwork_ << 1233 struct kwork_ 1018 struct kwork_work *work, 1234 enum kwork_tr << 1235 struct evsel 1019 struct evsel *evsel, 1236 struct perf_s 1020 struct perf_sample *sample, 1237 struct machin 1021 struct machine *machine) 1238 { 1022 { 1239 char *modp = NULL; 1023 char *modp = NULL; 1240 unsigned long long function_addr = ev 1024 unsigned long long function_addr = evsel__intval(evsel, 1241 1025 sample, "function"); 1242 1026 1243 work->class = class; 1027 work->class = class; 1244 work->cpu = sample->cpu; 1028 work->cpu = sample->cpu; 1245 work->id = evsel__intval(evsel, sampl 1029 work->id = evsel__intval(evsel, sample, "work"); 1246 work->name = function_addr == 0 ? NUL 1030 work->name = function_addr == 0 ? NULL : 1247 machine__resolve_kernel_addr( 1031 machine__resolve_kernel_addr(machine, &function_addr, &modp); 1248 } 1032 } 1249 1033 1250 static void workqueue_work_name(struct kwork_ 1034 static void workqueue_work_name(struct kwork_work *work, char *buf, int len) 1251 { 1035 { 1252 if (work->name != NULL) 1036 if (work->name != NULL) 1253 snprintf(buf, len, "(w)%s", w 1037 snprintf(buf, len, "(w)%s", work->name); 1254 else 1038 else 1255 snprintf(buf, len, "(w)0x%" P 1039 snprintf(buf, len, "(w)0x%" PRIx64, work->id); 1256 } 1040 } 1257 1041 1258 static struct kwork_class kwork_workqueue = { 1042 static struct kwork_class kwork_workqueue = { 1259 .name = "workqueue", 1043 .name = "workqueue", 1260 .type = KWORK_CLASS_WORKQUE 1044 .type = KWORK_CLASS_WORKQUEUE, 1261 .nr_tracepoints = 3, 1045 .nr_tracepoints = 3, 1262 .tp_handlers = workqueue_tp_handle 1046 .tp_handlers = workqueue_tp_handlers, 1263 .class_init = workqueue_class_ini 1047 .class_init = workqueue_class_init, 1264 .work_init = workqueue_work_init 1048 .work_init = workqueue_work_init, 1265 .work_name = workqueue_work_name 1049 .work_name = workqueue_work_name, 1266 }; 1050 }; 1267 1051 1268 static struct kwork_class kwork_sched; << 1269 static int process_sched_switch_event(struct << 1270 struct << 1271 struct << 1272 struct << 1273 { << 1274 struct perf_kwork *kwork = container_ << 1275 << 1276 if (kwork->tp_handler->sched_switch_e << 1277 return kwork->tp_handler->sch << 1278 << 1279 return 0; << 1280 } << 1281 << 1282 const struct evsel_str_handler sched_tp_handl << 1283 { "sched:sched_switch", process_sche << 1284 }; << 1285 << 1286 static int sched_class_init(struct kwork_clas << 1287 struct perf_sessi << 1288 { << 1289 if (perf_session__set_tracepoints_han << 1290 << 1291 pr_err("Failed to set sched t << 1292 return -1; << 1293 } << 1294 << 1295 class->work_root = RB_ROOT_CACHED; << 1296 return 0; << 1297 } << 1298 << 1299 static void sched_work_init(struct perf_kwork << 1300 struct kwork_clas << 1301 struct kwork_work << 1302 enum kwork_trace_ << 1303 struct evsel *evs << 1304 struct perf_sampl << 1305 struct machine *m << 1306 { << 1307 work->class = class; << 1308 work->cpu = sample->cpu; << 1309 << 1310 if (src_type == KWORK_TRACE_EXIT) { << 1311 work->id = evsel__intval(evse << 1312 work->name = strdup(evsel__st << 1313 } else if (src_type == KWORK_TRACE_EN << 1314 work->id = evsel__intval(evse << 1315 work->name = strdup(evsel__st << 1316 } << 1317 } << 1318 << 1319 static void sched_work_name(struct kwork_work << 1320 { << 1321 snprintf(buf, len, "%s", work->name); << 1322 } << 1323 << 1324 static struct kwork_class kwork_sched = { << 1325 .name = "sched", << 1326 .type = KWORK_CLASS_SCHED, << 1327 .nr_tracepoints = ARRAY_SIZE(sched_tp << 1328 .tp_handlers = sched_tp_handlers, << 1329 .class_init = sched_class_init, << 1330 .work_init = sched_work_init, << 1331 .work_name = sched_work_name, << 1332 }; << 1333 << 1334 static struct kwork_class *kwork_class_suppor 1052 static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = { 1335 [KWORK_CLASS_IRQ] = &kwork_irq, 1053 [KWORK_CLASS_IRQ] = &kwork_irq, 1336 [KWORK_CLASS_SOFTIRQ] = &kwork_soft 1054 [KWORK_CLASS_SOFTIRQ] = &kwork_softirq, 1337 [KWORK_CLASS_WORKQUEUE] = &kwork_work 1055 [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue, 1338 [KWORK_CLASS_SCHED] = &kwork_sche << 1339 }; 1056 }; 1340 1057 1341 static void print_separator(int len) 1058 static void print_separator(int len) 1342 { 1059 { 1343 printf(" %.*s\n", len, graph_dotted_l 1060 printf(" %.*s\n", len, graph_dotted_line); 1344 } 1061 } 1345 1062 1346 static int report_print_work(struct perf_kwor 1063 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work) 1347 { 1064 { 1348 int ret = 0; 1065 int ret = 0; 1349 char kwork_name[PRINT_KWORK_NAME_WIDT 1066 char kwork_name[PRINT_KWORK_NAME_WIDTH]; 1350 char max_runtime_start[32], max_runti 1067 char max_runtime_start[32], max_runtime_end[32]; 1351 char max_latency_start[32], max_laten 1068 char max_latency_start[32], max_latency_end[32]; 1352 1069 1353 printf(" "); 1070 printf(" "); 1354 1071 1355 /* 1072 /* 1356 * kwork name 1073 * kwork name 1357 */ 1074 */ 1358 if (work->class && work->class->work_ 1075 if (work->class && work->class->work_name) { 1359 work->class->work_name(work, 1076 work->class->work_name(work, kwork_name, 1360 PRINT_ 1077 PRINT_KWORK_NAME_WIDTH); 1361 ret += printf(" %-*s |", PRIN 1078 ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name); 1362 } else { 1079 } else { 1363 ret += printf(" %-*s |", PRIN 1080 ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, ""); 1364 } 1081 } 1365 1082 1366 /* 1083 /* 1367 * cpu 1084 * cpu 1368 */ 1085 */ 1369 ret += printf(" %0*d |", PRINT_CPU_WI 1086 ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu); 1370 1087 1371 /* 1088 /* 1372 * total runtime 1089 * total runtime 1373 */ 1090 */ 1374 if (kwork->report == KWORK_REPORT_RUN 1091 if (kwork->report == KWORK_REPORT_RUNTIME) { 1375 ret += printf(" %*.*f ms |", 1092 ret += printf(" %*.*f ms |", 1376 PRINT_RUNTIME_W 1093 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH, 1377 (double)work->t 1094 (double)work->total_runtime / NSEC_PER_MSEC); 1378 } else if (kwork->report == KWORK_REP 1095 } else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay 1379 ret += printf(" %*.*f ms |", 1096 ret += printf(" %*.*f ms |", 1380 PRINT_LATENCY_W 1097 PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH, 1381 (double)work->t 1098 (double)work->total_latency / 1382 work->nr_atoms 1099 work->nr_atoms / NSEC_PER_MSEC); 1383 } 1100 } 1384 1101 1385 /* 1102 /* 1386 * count 1103 * count 1387 */ 1104 */ 1388 ret += printf(" %*" PRIu64 " |", PRIN 1105 ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms); 1389 1106 1390 /* 1107 /* 1391 * max runtime, max runtime start, ma 1108 * max runtime, max runtime start, max runtime end 1392 */ 1109 */ 1393 if (kwork->report == KWORK_REPORT_RUN 1110 if (kwork->report == KWORK_REPORT_RUNTIME) { 1394 timestamp__scnprintf_usec(wor 1111 timestamp__scnprintf_usec(work->max_runtime_start, 1395 max 1112 max_runtime_start, 1396 siz 1113 sizeof(max_runtime_start)); 1397 timestamp__scnprintf_usec(wor 1114 timestamp__scnprintf_usec(work->max_runtime_end, 1398 max 1115 max_runtime_end, 1399 siz 1116 sizeof(max_runtime_end)); 1400 ret += printf(" %*.*f ms | %* 1117 ret += printf(" %*.*f ms | %*s s | %*s s |", 1401 PRINT_RUNTIME_W 1118 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH, 1402 (double)work->m 1119 (double)work->max_runtime / NSEC_PER_MSEC, 1403 PRINT_TIMESTAMP 1120 PRINT_TIMESTAMP_WIDTH, max_runtime_start, 1404 PRINT_TIMESTAMP 1121 PRINT_TIMESTAMP_WIDTH, max_runtime_end); 1405 } 1122 } 1406 /* 1123 /* 1407 * max delay, max delay start, max de 1124 * max delay, max delay start, max delay end 1408 */ 1125 */ 1409 else if (kwork->report == KWORK_REPOR 1126 else if (kwork->report == KWORK_REPORT_LATENCY) { 1410 timestamp__scnprintf_usec(wor 1127 timestamp__scnprintf_usec(work->max_latency_start, 1411 max 1128 max_latency_start, 1412 siz 1129 sizeof(max_latency_start)); 1413 timestamp__scnprintf_usec(wor 1130 timestamp__scnprintf_usec(work->max_latency_end, 1414 max 1131 max_latency_end, 1415 siz 1132 sizeof(max_latency_end)); 1416 ret += printf(" %*.*f ms | %* 1133 ret += printf(" %*.*f ms | %*s s | %*s s |", 1417 PRINT_LATENCY_W 1134 PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH, 1418 (double)work->m 1135 (double)work->max_latency / NSEC_PER_MSEC, 1419 PRINT_TIMESTAMP 1136 PRINT_TIMESTAMP_WIDTH, max_latency_start, 1420 PRINT_TIMESTAMP 1137 PRINT_TIMESTAMP_WIDTH, max_latency_end); 1421 } 1138 } 1422 1139 1423 printf("\n"); 1140 printf("\n"); 1424 return ret; 1141 return ret; 1425 } 1142 } 1426 1143 1427 static int report_print_header(struct perf_kw 1144 static int report_print_header(struct perf_kwork *kwork) 1428 { 1145 { 1429 int ret; 1146 int ret; 1430 1147 1431 printf("\n "); 1148 printf("\n "); 1432 ret = printf(" %-*s | %-*s |", 1149 ret = printf(" %-*s | %-*s |", 1433 PRINT_KWORK_NAME_WIDTH, 1150 PRINT_KWORK_NAME_WIDTH, "Kwork Name", 1434 PRINT_CPU_WIDTH, "Cpu"); 1151 PRINT_CPU_WIDTH, "Cpu"); 1435 1152 1436 if (kwork->report == KWORK_REPORT_RUN 1153 if (kwork->report == KWORK_REPORT_RUNTIME) { 1437 ret += printf(" %-*s |", 1154 ret += printf(" %-*s |", 1438 PRINT_RUNTIME_H 1155 PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime"); 1439 } else if (kwork->report == KWORK_REP 1156 } else if (kwork->report == KWORK_REPORT_LATENCY) { 1440 ret += printf(" %-*s |", 1157 ret += printf(" %-*s |", 1441 PRINT_LATENCY_H 1158 PRINT_LATENCY_HEADER_WIDTH, "Avg delay"); 1442 } 1159 } 1443 1160 1444 ret += printf(" %-*s |", PRINT_COUNT_ 1161 ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count"); 1445 1162 1446 if (kwork->report == KWORK_REPORT_RUN 1163 if (kwork->report == KWORK_REPORT_RUNTIME) { 1447 ret += printf(" %-*s | %-*s | 1164 ret += printf(" %-*s | %-*s | %-*s |", 1448 PRINT_RUNTIME_H 1165 PRINT_RUNTIME_HEADER_WIDTH, "Max runtime", 1449 PRINT_TIMESTAMP 1166 PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start", 1450 PRINT_TIMESTAMP 1167 PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end"); 1451 } else if (kwork->report == KWORK_REP 1168 } else if (kwork->report == KWORK_REPORT_LATENCY) { 1452 ret += printf(" %-*s | %-*s | 1169 ret += printf(" %-*s | %-*s | %-*s |", 1453 PRINT_LATENCY_H 1170 PRINT_LATENCY_HEADER_WIDTH, "Max delay", 1454 PRINT_TIMESTAMP 1171 PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start", 1455 PRINT_TIMESTAMP 1172 PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end"); 1456 } 1173 } 1457 1174 1458 printf("\n"); 1175 printf("\n"); 1459 print_separator(ret); 1176 print_separator(ret); 1460 return ret; 1177 return ret; 1461 } 1178 } 1462 1179 1463 static void timehist_print_header(void) 1180 static void timehist_print_header(void) 1464 { 1181 { 1465 /* 1182 /* 1466 * header row 1183 * header row 1467 */ 1184 */ 1468 printf(" %-*s %-*s %-*s %-*s %-*s 1185 printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n", 1469 PRINT_TIMESTAMP_WIDTH, "Runtim 1186 PRINT_TIMESTAMP_WIDTH, "Runtime start", 1470 PRINT_TIMESTAMP_WIDTH, "Runtim 1187 PRINT_TIMESTAMP_WIDTH, "Runtime end", 1471 PRINT_TIMEHIST_CPU_WIDTH, "Cpu 1188 PRINT_TIMEHIST_CPU_WIDTH, "Cpu", 1472 PRINT_KWORK_NAME_WIDTH, "Kwork 1189 PRINT_KWORK_NAME_WIDTH, "Kwork name", 1473 PRINT_RUNTIME_WIDTH, "Runtime" 1190 PRINT_RUNTIME_WIDTH, "Runtime", 1474 PRINT_RUNTIME_WIDTH, "Delaytim 1191 PRINT_RUNTIME_WIDTH, "Delaytime"); 1475 1192 1476 /* 1193 /* 1477 * units row 1194 * units row 1478 */ 1195 */ 1479 printf(" %-*s %-*s %-*s %-*s %-*s 1196 printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n", 1480 PRINT_TIMESTAMP_WIDTH, "", 1197 PRINT_TIMESTAMP_WIDTH, "", 1481 PRINT_TIMESTAMP_WIDTH, "", 1198 PRINT_TIMESTAMP_WIDTH, "", 1482 PRINT_TIMEHIST_CPU_WIDTH, "", 1199 PRINT_TIMEHIST_CPU_WIDTH, "", 1483 PRINT_KWORK_NAME_WIDTH, "(TYPE 1200 PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM", 1484 PRINT_RUNTIME_WIDTH, "(msec)", 1201 PRINT_RUNTIME_WIDTH, "(msec)", 1485 PRINT_RUNTIME_WIDTH, "(msec)") 1202 PRINT_RUNTIME_WIDTH, "(msec)"); 1486 1203 1487 /* 1204 /* 1488 * separator 1205 * separator 1489 */ 1206 */ 1490 printf(" %.*s %.*s %.*s %.*s %.*s 1207 printf(" %.*s %.*s %.*s %.*s %.*s %.*s\n", 1491 PRINT_TIMESTAMP_WIDTH, graph_d 1208 PRINT_TIMESTAMP_WIDTH, graph_dotted_line, 1492 PRINT_TIMESTAMP_WIDTH, graph_d 1209 PRINT_TIMESTAMP_WIDTH, graph_dotted_line, 1493 PRINT_TIMEHIST_CPU_WIDTH, grap 1210 PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line, 1494 PRINT_KWORK_NAME_WIDTH, graph_ 1211 PRINT_KWORK_NAME_WIDTH, graph_dotted_line, 1495 PRINT_RUNTIME_WIDTH, graph_dot 1212 PRINT_RUNTIME_WIDTH, graph_dotted_line, 1496 PRINT_RUNTIME_WIDTH, graph_dot 1213 PRINT_RUNTIME_WIDTH, graph_dotted_line); 1497 } 1214 } 1498 1215 1499 static void print_summary(struct perf_kwork * 1216 static void print_summary(struct perf_kwork *kwork) 1500 { 1217 { 1501 u64 time = kwork->timeend - kwork->ti 1218 u64 time = kwork->timeend - kwork->timestart; 1502 1219 1503 printf(" Total count : %9 1220 printf(" Total count : %9" PRIu64 "\n", kwork->all_count); 1504 printf(" Total runtime (msec) : %9 1221 printf(" Total runtime (msec) : %9.3f (%.3f%% load average)\n", 1505 (double)kwork->all_runtime / N 1222 (double)kwork->all_runtime / NSEC_PER_MSEC, 1506 time == 0 ? 0 : (double)kwork- 1223 time == 0 ? 0 : (double)kwork->all_runtime / time); 1507 printf(" Total time span (msec) : %9 1224 printf(" Total time span (msec) : %9.3f\n", 1508 (double)time / NSEC_PER_MSEC); 1225 (double)time / NSEC_PER_MSEC); 1509 } 1226 } 1510 1227 1511 static unsigned long long nr_list_entry(struc 1228 static unsigned long long nr_list_entry(struct list_head *head) 1512 { 1229 { 1513 struct list_head *pos; 1230 struct list_head *pos; 1514 unsigned long long n = 0; 1231 unsigned long long n = 0; 1515 1232 1516 list_for_each(pos, head) 1233 list_for_each(pos, head) 1517 n++; 1234 n++; 1518 1235 1519 return n; 1236 return n; 1520 } 1237 } 1521 1238 1522 static void print_skipped_events(struct perf_ 1239 static void print_skipped_events(struct perf_kwork *kwork) 1523 { 1240 { 1524 int i; 1241 int i; 1525 const char *const kwork_event_str[] = 1242 const char *const kwork_event_str[] = { 1526 [KWORK_TRACE_RAISE] = "raise" 1243 [KWORK_TRACE_RAISE] = "raise", 1527 [KWORK_TRACE_ENTRY] = "entry" 1244 [KWORK_TRACE_ENTRY] = "entry", 1528 [KWORK_TRACE_EXIT] = "exit", 1245 [KWORK_TRACE_EXIT] = "exit", 1529 }; 1246 }; 1530 1247 1531 if ((kwork->nr_skipped_events[KWORK_T 1248 if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) && 1532 (kwork->nr_events != 0)) { 1249 (kwork->nr_events != 0)) { 1533 printf(" INFO: %.3f%% skippe 1250 printf(" INFO: %.3f%% skipped events (%" PRIu64 " including ", 1534 (double)kwork->nr_skip 1251 (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] / 1535 (double)kwork->nr_even 1252 (double)kwork->nr_events * 100.0, 1536 kwork->nr_skipped_even 1253 kwork->nr_skipped_events[KWORK_TRACE_MAX]); 1537 1254 1538 for (i = 0; i < KWORK_TRACE_M 1255 for (i = 0; i < KWORK_TRACE_MAX; i++) { 1539 printf("%" PRIu64 " % 1256 printf("%" PRIu64 " %s%s", 1540 kwork->nr_skip 1257 kwork->nr_skipped_events[i], 1541 kwork_event_st 1258 kwork_event_str[i], 1542 (i == KWORK_TR 1259 (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", "); 1543 } 1260 } 1544 } 1261 } 1545 1262 1546 if (verbose > 0) 1263 if (verbose > 0) 1547 printf(" INFO: use %lld atom 1264 printf(" INFO: use %lld atom pages\n", 1548 nr_list_entry(&kwork-> 1265 nr_list_entry(&kwork->atom_page_list)); 1549 } 1266 } 1550 1267 1551 static void print_bad_events(struct perf_kwor 1268 static void print_bad_events(struct perf_kwork *kwork) 1552 { 1269 { 1553 if ((kwork->nr_lost_events != 0) && ( 1270 if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) { 1554 printf(" INFO: %.3f%% lost e 1271 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", 1555 (double)kwork->nr_lost 1272 (double)kwork->nr_lost_events / 1556 (double)kwork->nr_even 1273 (double)kwork->nr_events * 100.0, 1557 kwork->nr_lost_events, 1274 kwork->nr_lost_events, kwork->nr_events, 1558 kwork->nr_lost_chunks) 1275 kwork->nr_lost_chunks); 1559 } 1276 } 1560 } 1277 } 1561 1278 1562 const char *graph_load = "||||||||||||||||||| !! 1279 static void work_sort(struct perf_kwork *kwork, struct kwork_class *class) 1563 const char *graph_idle = " << 1564 static void top_print_per_cpu_load(struct per << 1565 { << 1566 int i, load_width; << 1567 u64 total, load, load_ratio; << 1568 struct kwork_top_stat *stat = &kwork- << 1569 << 1570 for (i = 0; i < MAX_NR_CPUS; i++) { << 1571 total = stat->cpus_runtime[i] << 1572 load = stat->cpus_runtime[i]. << 1573 if (test_bit(i, stat->all_cpu << 1574 load_ratio = load * 1 << 1575 load_width = PRINT_CP << 1576 load_ratio / << 1577 << 1578 printf("%%Cpu%-*d[%.* << 1579 PRINT_CPU_WIDT << 1580 load_width, gr << 1581 PRINT_CPU_USAG << 1582 graph_idle, << 1583 PRINT_CPU_USAG << 1584 PRINT_CPU_USAG << 1585 (double)load_r << 1586 } << 1587 } << 1588 } << 1589 << 1590 static void top_print_cpu_usage(struct perf_k << 1591 { << 1592 struct kwork_top_stat *stat = &kwork- << 1593 u64 idle_time = stat->cpus_runtime[MA << 1594 u64 hardirq_time = stat->cpus_runtime << 1595 u64 softirq_time = stat->cpus_runtime << 1596 int cpus_nr = bitmap_weight(stat->all << 1597 u64 cpus_total_time = stat->cpus_runt << 1598 << 1599 printf("Total : %*.*f ms, %d cpus\n" << 1600 PRINT_RUNTIME_WIDTH, RPINT_DEC << 1601 (double)cpus_total_time / NSEC << 1602 cpus_nr); << 1603 << 1604 printf("%%Cpu(s): %*.*f%% id, %*.*f%% << 1605 PRINT_CPU_USAGE_WIDTH, PRINT_C << 1606 cpus_total_time ? (double)idle << 1607 << 1608 PRINT_CPU_USAGE_WIDTH, PRINT_C << 1609 cpus_total_time ? (double)hard << 1610 << 1611 PRINT_CPU_USAGE_WIDTH, PRINT_C << 1612 cpus_total_time ? (double)soft << 1613 << 1614 top_print_per_cpu_load(kwork); << 1615 } << 1616 << 1617 static void top_print_header(struct perf_kwor << 1618 { << 1619 int ret; << 1620 << 1621 printf("\n "); << 1622 ret = printf(" %*s %s%*s%s %*s %*s << 1623 PRINT_PID_WIDTH, "PID", << 1624 << 1625 kwork->use_bpf ? " " : " << 1626 kwork->use_bpf ? PRINT_P << 1627 kwork->use_bpf ? "SPID" << 1628 kwork->use_bpf ? " " : " << 1629 << 1630 PRINT_CPU_USAGE_WIDTH, " << 1631 PRINT_RUNTIME_HEADER_WID << 1632 PRINT_TASK_NAME_WIDTH, " << 1633 printf("\n "); << 1634 print_separator(ret); << 1635 } << 1636 << 1637 static int top_print_work(struct perf_kwork * << 1638 { << 1639 int ret = 0; << 1640 << 1641 printf(" "); << 1642 << 1643 /* << 1644 * pid << 1645 */ << 1646 ret += printf(" %*" PRIu64 " ", PRINT << 1647 << 1648 /* << 1649 * tgid << 1650 */ << 1651 if (kwork->use_bpf) << 1652 ret += printf(" %*d ", PRINT_ << 1653 << 1654 /* << 1655 * cpu usage << 1656 */ << 1657 ret += printf(" %*.*f ", << 1658 PRINT_CPU_USAGE_WIDTH, << 1659 (double)work->cpu_usage << 1660 << 1661 /* << 1662 * total runtime << 1663 */ << 1664 ret += printf(" %*.*f ms ", << 1665 PRINT_RUNTIME_WIDTH + R << 1666 (double)work->total_run << 1667 << 1668 /* << 1669 * command << 1670 */ << 1671 if (kwork->use_bpf) << 1672 ret += printf(" %s%s%s", << 1673 work->is_kthrea << 1674 work->name, << 1675 work->is_kthrea << 1676 else << 1677 ret += printf(" %-*s", PRINT_ << 1678 << 1679 printf("\n"); << 1680 return ret; << 1681 } << 1682 << 1683 static void work_sort(struct perf_kwork *kwor << 1684 struct kwork_class *cla << 1685 { 1280 { 1686 struct rb_node *node; 1281 struct rb_node *node; 1687 struct kwork_work *data; 1282 struct kwork_work *data; >> 1283 struct rb_root_cached *root = &class->work_root; 1688 1284 1689 pr_debug("Sorting %s ...\n", class->n 1285 pr_debug("Sorting %s ...\n", class->name); 1690 for (;;) { 1286 for (;;) { 1691 node = rb_first_cached(root); 1287 node = rb_first_cached(root); 1692 if (!node) 1288 if (!node) 1693 break; 1289 break; 1694 1290 1695 rb_erase_cached(node, root); 1291 rb_erase_cached(node, root); 1696 data = rb_entry(node, struct 1292 data = rb_entry(node, struct kwork_work, node); 1697 work_insert(&kwork->sorted_wo 1293 work_insert(&kwork->sorted_work_root, 1698 data, &kwork-> 1294 data, &kwork->sort_list); 1699 } 1295 } 1700 } 1296 } 1701 1297 1702 static void perf_kwork__sort(struct perf_kwor 1298 static void perf_kwork__sort(struct perf_kwork *kwork) 1703 { 1299 { 1704 struct kwork_class *class; 1300 struct kwork_class *class; 1705 1301 1706 list_for_each_entry(class, &kwork->cl 1302 list_for_each_entry(class, &kwork->class_list, list) 1707 work_sort(kwork, class, &clas !! 1303 work_sort(kwork, class); 1708 } 1304 } 1709 1305 1710 static int perf_kwork__check_config(struct pe 1306 static int perf_kwork__check_config(struct perf_kwork *kwork, 1711 struct pe 1307 struct perf_session *session) 1712 { 1308 { 1713 int ret; 1309 int ret; 1714 struct evsel *evsel; 1310 struct evsel *evsel; 1715 struct kwork_class *class; 1311 struct kwork_class *class; 1716 1312 1717 static struct trace_kwork_handler rep 1313 static struct trace_kwork_handler report_ops = { 1718 .entry_event = report_entry_e 1314 .entry_event = report_entry_event, 1719 .exit_event = report_exit_ev 1315 .exit_event = report_exit_event, 1720 }; 1316 }; 1721 static struct trace_kwork_handler lat 1317 static struct trace_kwork_handler latency_ops = { 1722 .raise_event = latency_raise_ 1318 .raise_event = latency_raise_event, 1723 .entry_event = latency_entry_ 1319 .entry_event = latency_entry_event, 1724 }; 1320 }; 1725 static struct trace_kwork_handler tim 1321 static struct trace_kwork_handler timehist_ops = { 1726 .raise_event = timehist_raise 1322 .raise_event = timehist_raise_event, 1727 .entry_event = timehist_entry 1323 .entry_event = timehist_entry_event, 1728 .exit_event = timehist_exit_ 1324 .exit_event = timehist_exit_event, 1729 }; 1325 }; 1730 static struct trace_kwork_handler top << 1731 .entry_event = timehis << 1732 .exit_event = top_exi << 1733 .sched_switch_event = top_sch << 1734 }; << 1735 1326 1736 switch (kwork->report) { 1327 switch (kwork->report) { 1737 case KWORK_REPORT_RUNTIME: 1328 case KWORK_REPORT_RUNTIME: 1738 kwork->tp_handler = &report_o 1329 kwork->tp_handler = &report_ops; 1739 break; 1330 break; 1740 case KWORK_REPORT_LATENCY: 1331 case KWORK_REPORT_LATENCY: 1741 kwork->tp_handler = &latency_ 1332 kwork->tp_handler = &latency_ops; 1742 break; 1333 break; 1743 case KWORK_REPORT_TIMEHIST: 1334 case KWORK_REPORT_TIMEHIST: 1744 kwork->tp_handler = &timehist 1335 kwork->tp_handler = &timehist_ops; 1745 break; 1336 break; 1746 case KWORK_REPORT_TOP: << 1747 kwork->tp_handler = &top_ops; << 1748 break; << 1749 default: 1337 default: 1750 pr_debug("Invalid report type 1338 pr_debug("Invalid report type %d\n", kwork->report); 1751 return -1; 1339 return -1; 1752 } 1340 } 1753 1341 1754 list_for_each_entry(class, &kwork->cl 1342 list_for_each_entry(class, &kwork->class_list, list) 1755 if ((class->class_init != NUL 1343 if ((class->class_init != NULL) && 1756 (class->class_init(class, 1344 (class->class_init(class, session) != 0)) 1757 return -1; 1345 return -1; 1758 1346 1759 if (kwork->cpu_list != NULL) { 1347 if (kwork->cpu_list != NULL) { 1760 ret = perf_session__cpu_bitma 1348 ret = perf_session__cpu_bitmap(session, 1761 1349 kwork->cpu_list, 1762 1350 kwork->cpu_bitmap); 1763 if (ret < 0) { 1351 if (ret < 0) { 1764 pr_err("Invalid cpu b 1352 pr_err("Invalid cpu bitmap\n"); 1765 return -1; 1353 return -1; 1766 } 1354 } 1767 } 1355 } 1768 1356 1769 if (kwork->time_str != NULL) { 1357 if (kwork->time_str != NULL) { 1770 ret = perf_time__parse_str(&k 1358 ret = perf_time__parse_str(&kwork->ptime, kwork->time_str); 1771 if (ret != 0) { 1359 if (ret != 0) { 1772 pr_err("Invalid time 1360 pr_err("Invalid time span\n"); 1773 return -1; 1361 return -1; 1774 } 1362 } 1775 } 1363 } 1776 1364 1777 list_for_each_entry(evsel, &session-> 1365 list_for_each_entry(evsel, &session->evlist->core.entries, core.node) { 1778 if (kwork->show_callchain && 1366 if (kwork->show_callchain && !evsel__has_callchain(evsel)) { 1779 pr_debug("Samples do 1367 pr_debug("Samples do not have callchains\n"); 1780 kwork->show_callchain 1368 kwork->show_callchain = 0; 1781 symbol_conf.use_callc 1369 symbol_conf.use_callchain = 0; 1782 } 1370 } 1783 } 1371 } 1784 1372 1785 return 0; 1373 return 0; 1786 } 1374 } 1787 1375 1788 static int perf_kwork__read_events(struct per 1376 static int perf_kwork__read_events(struct perf_kwork *kwork) 1789 { 1377 { 1790 int ret = -1; 1378 int ret = -1; 1791 struct perf_session *session = NULL; 1379 struct perf_session *session = NULL; 1792 1380 1793 struct perf_data data = { 1381 struct perf_data data = { 1794 .path = input_name, 1382 .path = input_name, 1795 .mode = PERF_DATA_MODE_READ, 1383 .mode = PERF_DATA_MODE_READ, 1796 .force = kwork->force, 1384 .force = kwork->force, 1797 }; 1385 }; 1798 1386 1799 session = perf_session__new(&data, &k 1387 session = perf_session__new(&data, &kwork->tool); 1800 if (IS_ERR(session)) { 1388 if (IS_ERR(session)) { 1801 pr_debug("Error creating perf 1389 pr_debug("Error creating perf session\n"); 1802 return PTR_ERR(session); 1390 return PTR_ERR(session); 1803 } 1391 } 1804 1392 1805 symbol__init(&session->header.env); 1393 symbol__init(&session->header.env); 1806 1394 1807 if (perf_kwork__check_config(kwork, s 1395 if (perf_kwork__check_config(kwork, session) != 0) 1808 goto out_delete; 1396 goto out_delete; 1809 1397 1810 if (session->tevent.pevent && 1398 if (session->tevent.pevent && 1811 tep_set_function_resolver(session 1399 tep_set_function_resolver(session->tevent.pevent, 1812 machine 1400 machine__resolve_kernel_addr, 1813 &sessio 1401 &session->machines.host) < 0) { 1814 pr_err("Failed to set libtrac 1402 pr_err("Failed to set libtraceevent function resolver\n"); 1815 goto out_delete; 1403 goto out_delete; 1816 } 1404 } 1817 1405 1818 if (kwork->report == KWORK_REPORT_TIM 1406 if (kwork->report == KWORK_REPORT_TIMEHIST) 1819 timehist_print_header(); 1407 timehist_print_header(); 1820 1408 1821 ret = perf_session__process_events(se 1409 ret = perf_session__process_events(session); 1822 if (ret) { 1410 if (ret) { 1823 pr_debug("Failed to process e 1411 pr_debug("Failed to process events, error %d\n", ret); 1824 goto out_delete; 1412 goto out_delete; 1825 } 1413 } 1826 1414 1827 kwork->nr_events = session->evli 1415 kwork->nr_events = session->evlist->stats.nr_events[0]; 1828 kwork->nr_lost_events = session->evli 1416 kwork->nr_lost_events = session->evlist->stats.total_lost; 1829 kwork->nr_lost_chunks = session->evli 1417 kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; 1830 1418 1831 out_delete: 1419 out_delete: 1832 perf_session__delete(session); 1420 perf_session__delete(session); 1833 return ret; 1421 return ret; 1834 } 1422 } 1835 1423 1836 static void process_skipped_events(struct per 1424 static void process_skipped_events(struct perf_kwork *kwork, 1837 struct kwo 1425 struct kwork_work *work) 1838 { 1426 { 1839 int i; 1427 int i; 1840 unsigned long long count; 1428 unsigned long long count; 1841 1429 1842 for (i = 0; i < KWORK_TRACE_MAX; i++) 1430 for (i = 0; i < KWORK_TRACE_MAX; i++) { 1843 count = nr_list_entry(&work-> 1431 count = nr_list_entry(&work->atom_list[i]); 1844 kwork->nr_skipped_events[i] + 1432 kwork->nr_skipped_events[i] += count; 1845 kwork->nr_skipped_events[KWOR 1433 kwork->nr_skipped_events[KWORK_TRACE_MAX] += count; 1846 } 1434 } 1847 } 1435 } 1848 1436 1849 struct kwork_work *perf_kwork_add_work(struct 1437 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork, 1850 struct 1438 struct kwork_class *class, 1851 struct 1439 struct kwork_work *key) 1852 { 1440 { 1853 struct kwork_work *work = NULL; 1441 struct kwork_work *work = NULL; 1854 1442 1855 work = work_new(key); 1443 work = work_new(key); 1856 if (work == NULL) 1444 if (work == NULL) 1857 return NULL; 1445 return NULL; 1858 1446 1859 work_insert(&class->work_root, work, 1447 work_insert(&class->work_root, work, &kwork->cmp_id); 1860 return work; 1448 return work; 1861 } 1449 } 1862 1450 1863 static void sig_handler(int sig) 1451 static void sig_handler(int sig) 1864 { 1452 { 1865 /* 1453 /* 1866 * Simply capture termination signal 1454 * Simply capture termination signal so that 1867 * the program can continue after pau 1455 * the program can continue after pause returns 1868 */ 1456 */ 1869 pr_debug("Capture signal %d\n", sig); !! 1457 pr_debug("Captuer signal %d\n", sig); 1870 } 1458 } 1871 1459 1872 static int perf_kwork__report_bpf(struct perf 1460 static int perf_kwork__report_bpf(struct perf_kwork *kwork) 1873 { 1461 { 1874 int ret; 1462 int ret; 1875 1463 1876 signal(SIGINT, sig_handler); 1464 signal(SIGINT, sig_handler); 1877 signal(SIGTERM, sig_handler); 1465 signal(SIGTERM, sig_handler); 1878 1466 1879 ret = perf_kwork__trace_prepare_bpf(k 1467 ret = perf_kwork__trace_prepare_bpf(kwork); 1880 if (ret) 1468 if (ret) 1881 return -1; 1469 return -1; 1882 1470 1883 printf("Starting trace, Hit <Ctrl+C> 1471 printf("Starting trace, Hit <Ctrl+C> to stop and report\n"); 1884 1472 1885 perf_kwork__trace_start(); 1473 perf_kwork__trace_start(); 1886 1474 1887 /* 1475 /* 1888 * a simple pause, wait here for stop 1476 * a simple pause, wait here for stop signal 1889 */ 1477 */ 1890 pause(); 1478 pause(); 1891 1479 1892 perf_kwork__trace_finish(); 1480 perf_kwork__trace_finish(); 1893 1481 1894 perf_kwork__report_read_bpf(kwork); 1482 perf_kwork__report_read_bpf(kwork); 1895 1483 1896 perf_kwork__report_cleanup_bpf(); 1484 perf_kwork__report_cleanup_bpf(); 1897 1485 1898 return 0; 1486 return 0; 1899 } 1487 } 1900 1488 1901 static int perf_kwork__report(struct perf_kwo 1489 static int perf_kwork__report(struct perf_kwork *kwork) 1902 { 1490 { 1903 int ret; 1491 int ret; 1904 struct rb_node *next; 1492 struct rb_node *next; 1905 struct kwork_work *work; 1493 struct kwork_work *work; 1906 1494 1907 if (kwork->use_bpf) 1495 if (kwork->use_bpf) 1908 ret = perf_kwork__report_bpf( 1496 ret = perf_kwork__report_bpf(kwork); 1909 else 1497 else 1910 ret = perf_kwork__read_events 1498 ret = perf_kwork__read_events(kwork); 1911 1499 1912 if (ret != 0) 1500 if (ret != 0) 1913 return -1; 1501 return -1; 1914 1502 1915 perf_kwork__sort(kwork); 1503 perf_kwork__sort(kwork); 1916 1504 1917 setup_pager(); 1505 setup_pager(); 1918 1506 1919 ret = report_print_header(kwork); 1507 ret = report_print_header(kwork); 1920 next = rb_first_cached(&kwork->sorted 1508 next = rb_first_cached(&kwork->sorted_work_root); 1921 while (next) { 1509 while (next) { 1922 work = rb_entry(next, struct 1510 work = rb_entry(next, struct kwork_work, node); 1923 process_skipped_events(kwork, 1511 process_skipped_events(kwork, work); 1924 1512 1925 if (work->nr_atoms != 0) { 1513 if (work->nr_atoms != 0) { 1926 report_print_work(kwo 1514 report_print_work(kwork, work); 1927 if (kwork->summary) { 1515 if (kwork->summary) { 1928 kwork->all_ru 1516 kwork->all_runtime += work->total_runtime; 1929 kwork->all_co 1517 kwork->all_count += work->nr_atoms; 1930 } 1518 } 1931 } 1519 } 1932 next = rb_next(next); 1520 next = rb_next(next); 1933 } 1521 } 1934 print_separator(ret); 1522 print_separator(ret); 1935 1523 1936 if (kwork->summary) { 1524 if (kwork->summary) { 1937 print_summary(kwork); 1525 print_summary(kwork); 1938 print_separator(ret); 1526 print_separator(ret); 1939 } 1527 } 1940 1528 1941 print_bad_events(kwork); 1529 print_bad_events(kwork); 1942 print_skipped_events(kwork); 1530 print_skipped_events(kwork); 1943 printf("\n"); 1531 printf("\n"); 1944 1532 1945 return 0; 1533 return 0; 1946 } 1534 } 1947 1535 1948 typedef int (*tracepoint_handler)(struct perf 1536 typedef int (*tracepoint_handler)(struct perf_tool *tool, 1949 struct evse 1537 struct evsel *evsel, 1950 struct perf 1538 struct perf_sample *sample, 1951 struct mach 1539 struct machine *machine); 1952 1540 1953 static int perf_kwork__process_tracepoint_sam 1541 static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool, 1954 1542 union perf_event *event __maybe_unused, 1955 1543 struct perf_sample *sample, 1956 1544 struct evsel *evsel, 1957 1545 struct machine *machine) 1958 { 1546 { 1959 int err = 0; 1547 int err = 0; 1960 1548 1961 if (evsel->handler != NULL) { 1549 if (evsel->handler != NULL) { 1962 tracepoint_handler f = evsel- 1550 tracepoint_handler f = evsel->handler; 1963 1551 1964 err = f(tool, evsel, sample, 1552 err = f(tool, evsel, sample, machine); 1965 } 1553 } 1966 1554 1967 return err; 1555 return err; 1968 } 1556 } 1969 1557 1970 static int perf_kwork__timehist(struct perf_k 1558 static int perf_kwork__timehist(struct perf_kwork *kwork) 1971 { 1559 { 1972 /* 1560 /* 1973 * event handlers for timehist option 1561 * event handlers for timehist option 1974 */ 1562 */ 1975 kwork->tool.comm = perf_event 1563 kwork->tool.comm = perf_event__process_comm; 1976 kwork->tool.exit = perf_event 1564 kwork->tool.exit = perf_event__process_exit; 1977 kwork->tool.fork = perf_event 1565 kwork->tool.fork = perf_event__process_fork; 1978 kwork->tool.attr = perf_event 1566 kwork->tool.attr = perf_event__process_attr; 1979 kwork->tool.tracing_data = perf_event 1567 kwork->tool.tracing_data = perf_event__process_tracing_data; 1980 kwork->tool.build_id = perf_event 1568 kwork->tool.build_id = perf_event__process_build_id; 1981 kwork->tool.ordered_events = true; 1569 kwork->tool.ordered_events = true; 1982 kwork->tool.ordering_requires_timesta 1570 kwork->tool.ordering_requires_timestamps = true; 1983 symbol_conf.use_callchain = kwork->sh 1571 symbol_conf.use_callchain = kwork->show_callchain; 1984 1572 1985 if (symbol__validate_sym_arguments()) 1573 if (symbol__validate_sym_arguments()) { 1986 pr_err("Failed to validate sy 1574 pr_err("Failed to validate sym arguments\n"); 1987 return -1; 1575 return -1; 1988 } 1576 } 1989 1577 1990 setup_pager(); 1578 setup_pager(); 1991 1579 1992 return perf_kwork__read_events(kwork) 1580 return perf_kwork__read_events(kwork); 1993 } 1581 } 1994 1582 1995 static void top_calc_total_runtime(struct per << 1996 { << 1997 struct kwork_class *class; << 1998 struct kwork_work *work; << 1999 struct rb_node *next; << 2000 struct kwork_top_stat *stat = &kwork- << 2001 << 2002 class = get_kwork_class(kwork, KWORK_ << 2003 if (!class) << 2004 return; << 2005 << 2006 next = rb_first_cached(&class->work_r << 2007 while (next) { << 2008 work = rb_entry(next, struct << 2009 BUG_ON(work->cpu >= MAX_NR_CP << 2010 stat->cpus_runtime[work->cpu] << 2011 stat->cpus_runtime[MAX_NR_CPU << 2012 next = rb_next(next); << 2013 } << 2014 } << 2015 << 2016 static void top_calc_idle_time(struct perf_kw << 2017 struct kwork_ << 2018 { << 2019 struct kwork_top_stat *stat = &kwork- << 2020 << 2021 if (work->id == 0) { << 2022 stat->cpus_runtime[work->cpu] << 2023 stat->cpus_runtime[MAX_NR_CPU << 2024 } << 2025 } << 2026 << 2027 static void top_calc_irq_runtime(struct perf_ << 2028 enum kwork_c << 2029 struct kwork << 2030 { << 2031 struct kwork_top_stat *stat = &kwork- << 2032 << 2033 if (type == KWORK_CLASS_IRQ) { << 2034 stat->cpus_runtime[work->cpu] << 2035 stat->cpus_runtime[MAX_NR_CPU << 2036 } else if (type == KWORK_CLASS_SOFTIR << 2037 stat->cpus_runtime[work->cpu] << 2038 stat->cpus_runtime[MAX_NR_CPU << 2039 } << 2040 } << 2041 << 2042 static void top_subtract_irq_runtime(struct p << 2043 struct k << 2044 { << 2045 struct kwork_class *class; << 2046 struct kwork_work *data; << 2047 unsigned int i; << 2048 int irq_class_list[] = {KWORK_CLASS_I << 2049 << 2050 for (i = 0; i < ARRAY_SIZE(irq_class_ << 2051 class = get_kwork_class(kwork << 2052 if (!class) << 2053 continue; << 2054 << 2055 data = find_work_by_id(&class << 2056 work-> << 2057 if (!data) << 2058 continue; << 2059 << 2060 if (work->total_runtime > dat << 2061 work->total_runtime - << 2062 top_calc_irq_runtime( << 2063 } << 2064 } << 2065 } << 2066 << 2067 static void top_calc_cpu_usage(struct perf_kw << 2068 { << 2069 struct kwork_class *class; << 2070 struct kwork_work *work; << 2071 struct rb_node *next; << 2072 struct kwork_top_stat *stat = &kwork- << 2073 << 2074 class = get_kwork_class(kwork, KWORK_ << 2075 if (!class) << 2076 return; << 2077 << 2078 next = rb_first_cached(&class->work_r << 2079 while (next) { << 2080 work = rb_entry(next, struct << 2081 << 2082 if (work->total_runtime == 0) << 2083 goto next; << 2084 << 2085 __set_bit(work->cpu, stat->al << 2086 << 2087 top_subtract_irq_runtime(kwor << 2088 << 2089 work->cpu_usage = work->total << 2090 stat->cpus_runtime[wo << 2091 << 2092 top_calc_idle_time(kwork, wor << 2093 next: << 2094 next = rb_next(next); << 2095 } << 2096 } << 2097 << 2098 static void top_calc_load_runtime(struct perf << 2099 struct kwor << 2100 { << 2101 struct kwork_top_stat *stat = &kwork- << 2102 << 2103 if (work->id != 0) { << 2104 stat->cpus_runtime[work->cpu] << 2105 stat->cpus_runtime[MAX_NR_CPU << 2106 } << 2107 } << 2108 << 2109 static void top_merge_tasks(struct perf_kwork << 2110 { << 2111 struct kwork_work *merged_work, *data << 2112 struct kwork_class *class; << 2113 struct rb_node *node; << 2114 int cpu; << 2115 struct rb_root_cached merged_root = R << 2116 << 2117 class = get_kwork_class(kwork, KWORK_ << 2118 if (!class) << 2119 return; << 2120 << 2121 for (;;) { << 2122 node = rb_first_cached(&class << 2123 if (!node) << 2124 break; << 2125 << 2126 rb_erase_cached(node, &class- << 2127 data = rb_entry(node, struct << 2128 << 2129 if (!profile_name_match(kwork << 2130 continue; << 2131 << 2132 cpu = data->cpu; << 2133 merged_work = find_work_by_id << 2134 << 2135 if (!merged_work) { << 2136 work_insert(&merged_r << 2137 } else { << 2138 merged_work->total_ru << 2139 merged_work->cpu_usag << 2140 } << 2141 << 2142 top_calc_load_runtime(kwork, << 2143 } << 2144 << 2145 work_sort(kwork, class, &merged_root) << 2146 } << 2147 << 2148 static void perf_kwork__top_report(struct per << 2149 { << 2150 struct kwork_work *work; << 2151 struct rb_node *next; << 2152 << 2153 printf("\n"); << 2154 << 2155 top_print_cpu_usage(kwork); << 2156 top_print_header(kwork); << 2157 next = rb_first_cached(&kwork->sorted << 2158 while (next) { << 2159 work = rb_entry(next, struct << 2160 process_skipped_events(kwork, << 2161 << 2162 if (work->total_runtime == 0) << 2163 goto next; << 2164 << 2165 top_print_work(kwork, work); << 2166 << 2167 next: << 2168 next = rb_next(next); << 2169 } << 2170 << 2171 printf("\n"); << 2172 } << 2173 << 2174 static int perf_kwork__top_bpf(struct perf_kw << 2175 { << 2176 int ret; << 2177 << 2178 signal(SIGINT, sig_handler); << 2179 signal(SIGTERM, sig_handler); << 2180 << 2181 ret = perf_kwork__top_prepare_bpf(kwo << 2182 if (ret) << 2183 return -1; << 2184 << 2185 printf("Starting trace, Hit <Ctrl+C> << 2186 << 2187 perf_kwork__top_start(); << 2188 << 2189 /* << 2190 * a simple pause, wait here for stop << 2191 */ << 2192 pause(); << 2193 << 2194 perf_kwork__top_finish(); << 2195 << 2196 perf_kwork__top_read_bpf(kwork); << 2197 << 2198 perf_kwork__top_cleanup_bpf(); << 2199 << 2200 return 0; << 2201 << 2202 } << 2203 << 2204 static int perf_kwork__top(struct perf_kwork << 2205 { << 2206 struct __top_cpus_runtime *cpus_runti << 2207 int ret = 0; << 2208 << 2209 cpus_runtime = zalloc(sizeof(struct _ << 2210 if (!cpus_runtime) << 2211 return -1; << 2212 << 2213 kwork->top_stat.cpus_runtime = cpus_r << 2214 bitmap_zero(kwork->top_stat.all_cpus_ << 2215 << 2216 if (kwork->use_bpf) << 2217 ret = perf_kwork__top_bpf(kwo << 2218 else << 2219 ret = perf_kwork__read_events << 2220 << 2221 if (ret) << 2222 goto out; << 2223 << 2224 top_calc_total_runtime(kwork); << 2225 top_calc_cpu_usage(kwork); << 2226 top_merge_tasks(kwork); << 2227 << 2228 setup_pager(); << 2229 << 2230 perf_kwork__top_report(kwork); << 2231 << 2232 out: << 2233 zfree(&kwork->top_stat.cpus_runtime); << 2234 return ret; << 2235 } << 2236 << 2237 static void setup_event_list(struct perf_kwor 1583 static void setup_event_list(struct perf_kwork *kwork, 2238 const struct opt 1584 const struct option *options, 2239 const char * con 1585 const char * const usage_msg[]) 2240 { 1586 { 2241 int i; 1587 int i; 2242 struct kwork_class *class; 1588 struct kwork_class *class; 2243 char *tmp, *tok, *str; 1589 char *tmp, *tok, *str; 2244 1590 2245 /* << 2246 * set default events list if not spe << 2247 */ << 2248 if (kwork->event_list_str == NULL) 1591 if (kwork->event_list_str == NULL) 2249 kwork->event_list_str = "irq, !! 1592 goto null_event_list_str; 2250 1593 2251 str = strdup(kwork->event_list_str); 1594 str = strdup(kwork->event_list_str); 2252 for (tok = strtok_r(str, ", ", &tmp); 1595 for (tok = strtok_r(str, ", ", &tmp); 2253 tok; tok = strtok_r(NULL, ", ", 1596 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2254 for (i = 0; i < KWORK_CLASS_M 1597 for (i = 0; i < KWORK_CLASS_MAX; i++) { 2255 class = kwork_class_s 1598 class = kwork_class_supported_list[i]; 2256 if (strcmp(tok, class 1599 if (strcmp(tok, class->name) == 0) { 2257 list_add_tail 1600 list_add_tail(&class->list, &kwork->class_list); 2258 break; 1601 break; 2259 } 1602 } 2260 } 1603 } 2261 if (i == KWORK_CLASS_MAX) { 1604 if (i == KWORK_CLASS_MAX) { 2262 usage_with_options_ms 1605 usage_with_options_msg(usage_msg, options, 2263 1606 "Unknown --event key: `%s'", tok); 2264 } 1607 } 2265 } 1608 } 2266 free(str); 1609 free(str); 2267 1610 >> 1611 null_event_list_str: >> 1612 /* >> 1613 * config all kwork events if not specified >> 1614 */ >> 1615 if (list_empty(&kwork->class_list)) { >> 1616 for (i = 0; i < KWORK_CLASS_MAX; i++) { >> 1617 list_add_tail(&kwork_class_supported_list[i]->list, >> 1618 &kwork->class_list); >> 1619 } >> 1620 } >> 1621 2268 pr_debug("Config event list:"); 1622 pr_debug("Config event list:"); 2269 list_for_each_entry(class, &kwork->cl 1623 list_for_each_entry(class, &kwork->class_list, list) 2270 pr_debug(" %s", class->name); 1624 pr_debug(" %s", class->name); 2271 pr_debug("\n"); 1625 pr_debug("\n"); 2272 } 1626 } 2273 1627 2274 static int perf_kwork__record(struct perf_kwo 1628 static int perf_kwork__record(struct perf_kwork *kwork, 2275 int argc, const 1629 int argc, const char **argv) 2276 { 1630 { 2277 const char **rec_argv; 1631 const char **rec_argv; 2278 unsigned int rec_argc, i, j; 1632 unsigned int rec_argc, i, j; 2279 struct kwork_class *class; 1633 struct kwork_class *class; 2280 1634 2281 const char *const record_args[] = { 1635 const char *const record_args[] = { 2282 "record", 1636 "record", 2283 "-a", 1637 "-a", 2284 "-R", 1638 "-R", 2285 "-m", "1024", 1639 "-m", "1024", 2286 "-c", "1", 1640 "-c", "1", 2287 }; 1641 }; 2288 1642 2289 rec_argc = ARRAY_SIZE(record_args) + 1643 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 2290 1644 2291 list_for_each_entry(class, &kwork->cl 1645 list_for_each_entry(class, &kwork->class_list, list) 2292 rec_argc += 2 * class->nr_tra 1646 rec_argc += 2 * class->nr_tracepoints; 2293 1647 2294 rec_argv = calloc(rec_argc + 1, sizeo 1648 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 2295 if (rec_argv == NULL) 1649 if (rec_argv == NULL) 2296 return -ENOMEM; 1650 return -ENOMEM; 2297 1651 2298 for (i = 0; i < ARRAY_SIZE(record_arg 1652 for (i = 0; i < ARRAY_SIZE(record_args); i++) 2299 rec_argv[i] = strdup(record_a 1653 rec_argv[i] = strdup(record_args[i]); 2300 1654 2301 list_for_each_entry(class, &kwork->cl 1655 list_for_each_entry(class, &kwork->class_list, list) { 2302 for (j = 0; j < class->nr_tra 1656 for (j = 0; j < class->nr_tracepoints; j++) { 2303 rec_argv[i++] = strdu 1657 rec_argv[i++] = strdup("-e"); 2304 rec_argv[i++] = strdu 1658 rec_argv[i++] = strdup(class->tp_handlers[j].name); 2305 } 1659 } 2306 } 1660 } 2307 1661 2308 for (j = 1; j < (unsigned int)argc; j 1662 for (j = 1; j < (unsigned int)argc; j++, i++) 2309 rec_argv[i] = argv[j]; 1663 rec_argv[i] = argv[j]; 2310 1664 2311 BUG_ON(i != rec_argc); 1665 BUG_ON(i != rec_argc); 2312 1666 2313 pr_debug("record comm: "); 1667 pr_debug("record comm: "); 2314 for (j = 0; j < rec_argc; j++) 1668 for (j = 0; j < rec_argc; j++) 2315 pr_debug("%s ", rec_argv[j]); 1669 pr_debug("%s ", rec_argv[j]); 2316 pr_debug("\n"); 1670 pr_debug("\n"); 2317 1671 2318 return cmd_record(i, rec_argv); 1672 return cmd_record(i, rec_argv); 2319 } 1673 } 2320 1674 2321 int cmd_kwork(int argc, const char **argv) 1675 int cmd_kwork(int argc, const char **argv) 2322 { 1676 { 2323 static struct perf_kwork kwork = { 1677 static struct perf_kwork kwork = { 2324 .class_list = LIST_H 1678 .class_list = LIST_HEAD_INIT(kwork.class_list), 2325 .tool = { 1679 .tool = { 2326 .mmap = per !! 1680 .mmap = perf_event__process_mmap, 2327 .mmap2 = per !! 1681 .mmap2 = perf_event__process_mmap2, 2328 .sample = per !! 1682 .sample = perf_kwork__process_tracepoint_sample, 2329 .ordered_events = tru << 2330 }, 1683 }, 2331 .atom_page_list = LIST_H 1684 .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list), 2332 .sort_list = LIST_H 1685 .sort_list = LIST_HEAD_INIT(kwork.sort_list), 2333 .cmp_id = LIST_H 1686 .cmp_id = LIST_HEAD_INIT(kwork.cmp_id), 2334 .sorted_work_root = RB_ROO 1687 .sorted_work_root = RB_ROOT_CACHED, 2335 .tp_handler = NULL, 1688 .tp_handler = NULL, 2336 .profile_name = NULL, 1689 .profile_name = NULL, 2337 .cpu_list = NULL, 1690 .cpu_list = NULL, 2338 .time_str = NULL, 1691 .time_str = NULL, 2339 .force = false, 1692 .force = false, 2340 .event_list_str = NULL, 1693 .event_list_str = NULL, 2341 .summary = false, 1694 .summary = false, 2342 .sort_order = NULL, 1695 .sort_order = NULL, 2343 .show_callchain = false, 1696 .show_callchain = false, 2344 .max_stack = 5, 1697 .max_stack = 5, 2345 .timestart = 0, 1698 .timestart = 0, 2346 .timeend = 0, 1699 .timeend = 0, 2347 .nr_events = 0, 1700 .nr_events = 0, 2348 .nr_lost_chunks = 0, 1701 .nr_lost_chunks = 0, 2349 .nr_lost_events = 0, 1702 .nr_lost_events = 0, 2350 .all_runtime = 0, 1703 .all_runtime = 0, 2351 .all_count = 0, 1704 .all_count = 0, 2352 .nr_skipped_events = { 0 }, 1705 .nr_skipped_events = { 0 }, 2353 }; 1706 }; 2354 static const char default_report_sort 1707 static const char default_report_sort_order[] = "runtime, max, count"; 2355 static const char default_latency_sor 1708 static const char default_latency_sort_order[] = "avg, max, count"; 2356 static const char default_top_sort_or << 2357 const struct option kwork_options[] = 1709 const struct option kwork_options[] = { 2358 OPT_INCR('v', "verbose", &verbose, 1710 OPT_INCR('v', "verbose", &verbose, 2359 "be more verbose (show symbo 1711 "be more verbose (show symbol address, etc)"), 2360 OPT_BOOLEAN('D', "dump-raw-trace", &d 1712 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 2361 "dump raw trace in ASCII" 1713 "dump raw trace in ASCII"), 2362 OPT_STRING('k', "kwork", &kwork.event 1714 OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork", 2363 "list of kwork to profile !! 1715 "list of kwork to profile (irq, softirq, workqueue, etc)"), 2364 OPT_BOOLEAN('f', "force", &kwork.forc 1716 OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"), 2365 OPT_END() 1717 OPT_END() 2366 }; 1718 }; 2367 const struct option report_options[] 1719 const struct option report_options[] = { 2368 OPT_STRING('s', "sort", &kwork.sort_o 1720 OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]", 2369 "sort by key(s): runtime, 1721 "sort by key(s): runtime, max, count"), 2370 OPT_STRING('C', "cpu", &kwork.cpu_lis 1722 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu", 2371 "list of cpus to profile") 1723 "list of cpus to profile"), 2372 OPT_STRING('n', "name", &kwork.profil 1724 OPT_STRING('n', "name", &kwork.profile_name, "name", 2373 "event name to profile"), 1725 "event name to profile"), 2374 OPT_STRING(0, "time", &kwork.time_str 1726 OPT_STRING(0, "time", &kwork.time_str, "str", 2375 "Time span for analysis (s 1727 "Time span for analysis (start,stop)"), 2376 OPT_STRING('i', "input", &input_name, 1728 OPT_STRING('i', "input", &input_name, "file", 2377 "input file name"), 1729 "input file name"), 2378 OPT_BOOLEAN('S', "with-summary", &kwo 1730 OPT_BOOLEAN('S', "with-summary", &kwork.summary, 2379 "Show summary with statis 1731 "Show summary with statistics"), 2380 #ifdef HAVE_BPF_SKEL 1732 #ifdef HAVE_BPF_SKEL 2381 OPT_BOOLEAN('b', "use-bpf", &kwork.us 1733 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf, 2382 "Use BPF to measure kwork 1734 "Use BPF to measure kwork runtime"), 2383 #endif 1735 #endif 2384 OPT_PARENT(kwork_options) 1736 OPT_PARENT(kwork_options) 2385 }; 1737 }; 2386 const struct option latency_options[] 1738 const struct option latency_options[] = { 2387 OPT_STRING('s', "sort", &kwork.sort_o 1739 OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]", 2388 "sort by key(s): avg, max, 1740 "sort by key(s): avg, max, count"), 2389 OPT_STRING('C', "cpu", &kwork.cpu_lis 1741 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu", 2390 "list of cpus to profile") 1742 "list of cpus to profile"), 2391 OPT_STRING('n', "name", &kwork.profil 1743 OPT_STRING('n', "name", &kwork.profile_name, "name", 2392 "event name to profile"), 1744 "event name to profile"), 2393 OPT_STRING(0, "time", &kwork.time_str 1745 OPT_STRING(0, "time", &kwork.time_str, "str", 2394 "Time span for analysis (s 1746 "Time span for analysis (start,stop)"), 2395 OPT_STRING('i', "input", &input_name, 1747 OPT_STRING('i', "input", &input_name, "file", 2396 "input file name"), 1748 "input file name"), 2397 #ifdef HAVE_BPF_SKEL 1749 #ifdef HAVE_BPF_SKEL 2398 OPT_BOOLEAN('b', "use-bpf", &kwork.us 1750 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf, 2399 "Use BPF to measure kwork 1751 "Use BPF to measure kwork latency"), 2400 #endif 1752 #endif 2401 OPT_PARENT(kwork_options) 1753 OPT_PARENT(kwork_options) 2402 }; 1754 }; 2403 const struct option timehist_options[ 1755 const struct option timehist_options[] = { 2404 OPT_STRING('k', "vmlinux", &symbol_co 1756 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 2405 "file", "vmlinux pathname" 1757 "file", "vmlinux pathname"), 2406 OPT_STRING(0, "kallsyms", &symbol_con 1758 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 2407 "file", "kallsyms pathname 1759 "file", "kallsyms pathname"), 2408 OPT_BOOLEAN('g', "call-graph", &kwork 1760 OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain, 2409 "Display call chains if p 1761 "Display call chains if present"), 2410 OPT_UINTEGER(0, "max-stack", &kwork.m 1762 OPT_UINTEGER(0, "max-stack", &kwork.max_stack, 2411 "Maximum number of functio 1763 "Maximum number of functions to display backtrace."), 2412 OPT_STRING(0, "symfs", &symbol_conf.s 1764 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 2413 "Look for files with symb 1765 "Look for files with symbols relative to this directory"), 2414 OPT_STRING(0, "time", &kwork.time_str 1766 OPT_STRING(0, "time", &kwork.time_str, "str", 2415 "Time span for analysis (s 1767 "Time span for analysis (start,stop)"), 2416 OPT_STRING('C', "cpu", &kwork.cpu_lis 1768 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu", 2417 "list of cpus to profile") 1769 "list of cpus to profile"), 2418 OPT_STRING('n', "name", &kwork.profil 1770 OPT_STRING('n', "name", &kwork.profile_name, "name", 2419 "event name to profile"), 1771 "event name to profile"), 2420 OPT_STRING('i', "input", &input_name, 1772 OPT_STRING('i', "input", &input_name, "file", 2421 "input file name"), 1773 "input file name"), 2422 OPT_PARENT(kwork_options) 1774 OPT_PARENT(kwork_options) 2423 }; 1775 }; 2424 const struct option top_options[] = { << 2425 OPT_STRING('s', "sort", &kwork.sort_o << 2426 "sort by key(s): rate, run << 2427 OPT_STRING('C', "cpu", &kwork.cpu_lis << 2428 "list of cpus to profile") << 2429 OPT_STRING('n', "name", &kwork.profil << 2430 "event name to profile"), << 2431 OPT_STRING(0, "time", &kwork.time_str << 2432 "Time span for analysis (s << 2433 OPT_STRING('i', "input", &input_name, << 2434 "input file name"), << 2435 #ifdef HAVE_BPF_SKEL << 2436 OPT_BOOLEAN('b', "use-bpf", &kwork.us << 2437 "Use BPF to measure task << 2438 #endif << 2439 OPT_PARENT(kwork_options) << 2440 }; << 2441 const char *kwork_usage[] = { 1776 const char *kwork_usage[] = { 2442 NULL, 1777 NULL, 2443 NULL 1778 NULL 2444 }; 1779 }; 2445 const char * const report_usage[] = { 1780 const char * const report_usage[] = { 2446 "perf kwork report [<options> 1781 "perf kwork report [<options>]", 2447 NULL 1782 NULL 2448 }; 1783 }; 2449 const char * const latency_usage[] = 1784 const char * const latency_usage[] = { 2450 "perf kwork latency [<options 1785 "perf kwork latency [<options>]", 2451 NULL 1786 NULL 2452 }; 1787 }; 2453 const char * const timehist_usage[] = 1788 const char * const timehist_usage[] = { 2454 "perf kwork timehist [<option 1789 "perf kwork timehist [<options>]", 2455 NULL 1790 NULL 2456 }; 1791 }; 2457 const char * const top_usage[] = { << 2458 "perf kwork top [<options>]", << 2459 NULL << 2460 }; << 2461 const char *const kwork_subcommands[] 1792 const char *const kwork_subcommands[] = { 2462 "record", "report", "latency" !! 1793 "record", "report", "latency", "timehist", NULL 2463 }; 1794 }; 2464 1795 2465 argc = parse_options_subcommand(argc, 1796 argc = parse_options_subcommand(argc, argv, kwork_options, 2466 kwork 1797 kwork_subcommands, kwork_usage, 2467 PARSE 1798 PARSE_OPT_STOP_AT_NON_OPTION); 2468 if (!argc) 1799 if (!argc) 2469 usage_with_options(kwork_usag 1800 usage_with_options(kwork_usage, kwork_options); 2470 1801 >> 1802 setup_event_list(&kwork, kwork_options, kwork_usage); 2471 sort_dimension__add(&kwork, "id", &kw 1803 sort_dimension__add(&kwork, "id", &kwork.cmp_id); 2472 1804 2473 if (strlen(argv[0]) > 2 && strstarts( !! 1805 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) 2474 setup_event_list(&kwork, kwor << 2475 return perf_kwork__record(&kw 1806 return perf_kwork__record(&kwork, argc, argv); 2476 } else if (strlen(argv[0]) > 2 && str !! 1807 else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) { 2477 kwork.sort_order = default_re 1808 kwork.sort_order = default_report_sort_order; 2478 if (argc > 1) { 1809 if (argc > 1) { 2479 argc = parse_options( 1810 argc = parse_options(argc, argv, report_options, report_usage, 0); 2480 if (argc) 1811 if (argc) 2481 usage_with_op 1812 usage_with_options(report_usage, report_options); 2482 } 1813 } 2483 kwork.report = KWORK_REPORT_R 1814 kwork.report = KWORK_REPORT_RUNTIME; 2484 setup_sorting(&kwork, report_ 1815 setup_sorting(&kwork, report_options, report_usage); 2485 setup_event_list(&kwork, kwor << 2486 return perf_kwork__report(&kw 1816 return perf_kwork__report(&kwork); 2487 } else if (strlen(argv[0]) > 2 && str 1817 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) { 2488 kwork.sort_order = default_la 1818 kwork.sort_order = default_latency_sort_order; 2489 if (argc > 1) { 1819 if (argc > 1) { 2490 argc = parse_options( 1820 argc = parse_options(argc, argv, latency_options, latency_usage, 0); 2491 if (argc) 1821 if (argc) 2492 usage_with_op 1822 usage_with_options(latency_usage, latency_options); 2493 } 1823 } 2494 kwork.report = KWORK_REPORT_L 1824 kwork.report = KWORK_REPORT_LATENCY; 2495 setup_sorting(&kwork, latency 1825 setup_sorting(&kwork, latency_options, latency_usage); 2496 setup_event_list(&kwork, kwor << 2497 return perf_kwork__report(&kw 1826 return perf_kwork__report(&kwork); 2498 } else if (strlen(argv[0]) > 2 && str 1827 } else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) { 2499 if (argc > 1) { 1828 if (argc > 1) { 2500 argc = parse_options( 1829 argc = parse_options(argc, argv, timehist_options, timehist_usage, 0); 2501 if (argc) 1830 if (argc) 2502 usage_with_op 1831 usage_with_options(timehist_usage, timehist_options); 2503 } 1832 } 2504 kwork.report = KWORK_REPORT_T 1833 kwork.report = KWORK_REPORT_TIMEHIST; 2505 setup_event_list(&kwork, kwor << 2506 return perf_kwork__timehist(& 1834 return perf_kwork__timehist(&kwork); 2507 } else if (strlen(argv[0]) > 2 && str << 2508 kwork.sort_order = default_to << 2509 if (argc > 1) { << 2510 argc = parse_options( << 2511 if (argc) << 2512 usage_with_op << 2513 } << 2514 kwork.report = KWORK_REPORT_T << 2515 if (!kwork.event_list_str) << 2516 kwork.event_list_str << 2517 setup_event_list(&kwork, kwor << 2518 setup_sorting(&kwork, top_opt << 2519 return perf_kwork__top(&kwork << 2520 } else 1835 } else 2521 usage_with_options(kwork_usag 1836 usage_with_options(kwork_usage, kwork_options); 2522 1837 2523 return 0; 1838 return 0; 2524 } 1839 } 2525 1840
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.