1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CTF writing support via babeltrace. 4 * 5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com> 6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de> 7 */ 8 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <linux/compiler.h> 12 #include <linux/kernel.h> 13 #include <linux/zalloc.h> 14 #include <babeltrace/ctf-writer/writer.h> 15 #include <babeltrace/ctf-writer/clock.h> 16 #include <babeltrace/ctf-writer/stream.h> 17 #include <babeltrace/ctf-writer/event.h> 18 #include <babeltrace/ctf-writer/event-types.h> 19 #include <babeltrace/ctf-writer/event-fields.h> 20 #include <babeltrace/ctf-ir/utils.h> 21 #include <babeltrace/ctf/events.h> 22 #include "asm/bug.h" 23 #include "data-convert.h" 24 #include "session.h" 25 #include "debug.h" 26 #include "tool.h" 27 #include "evlist.h" 28 #include "evsel.h" 29 #include "machine.h" 30 #include "config.h" 31 #include <linux/ctype.h> 32 #include <linux/err.h> 33 #include <linux/time64.h> 34 #include "util.h" 35 #include "clockid.h" 36 #include "util/sample.h" 37 38 #ifdef HAVE_LIBTRACEEVENT 39 #include <traceevent/event-parse.h> 40 #endif 41 42 #define pr_N(n, fmt, ...) \ 43 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) 44 45 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) 46 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) 47 48 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) 49 50 struct evsel_priv { 51 struct bt_ctf_event_class *event_class; 52 }; 53 54 #define MAX_CPUS 4096 55 56 struct ctf_stream { 57 struct bt_ctf_stream *stream; 58 int cpu; 59 u32 count; 60 }; 61 62 struct ctf_writer { 63 /* writer primitives */ 64 struct bt_ctf_writer *writer; 65 struct ctf_stream **stream; 66 int stream_cnt; 67 struct bt_ctf_stream_class *stream_class; 68 struct bt_ctf_clock *clock; 69 70 /* data types */ 71 union { 72 struct { 73 struct bt_ctf_field_type *s64; 74 struct bt_ctf_field_type *u64; 75 struct bt_ctf_field_type *s32; 76 struct bt_ctf_field_type *u32; 77 struct bt_ctf_field_type *string; 78 struct bt_ctf_field_type *u32_hex; 79 struct bt_ctf_field_type *u64_hex; 80 }; 81 struct bt_ctf_field_type *array[6]; 82 } data; 83 struct bt_ctf_event_class *comm_class; 84 struct bt_ctf_event_class *exit_class; 85 struct bt_ctf_event_class *fork_class; 86 struct bt_ctf_event_class *mmap_class; 87 struct bt_ctf_event_class *mmap2_class; 88 }; 89 90 struct convert { 91 struct perf_tool tool; 92 struct ctf_writer writer; 93 94 u64 events_size; 95 u64 events_count; 96 u64 non_sample_count; 97 98 /* Ordered events configured queue size. */ 99 u64 queue_size; 100 }; 101 102 static int value_set(struct bt_ctf_field_type *type, 103 struct bt_ctf_event *event, 104 const char *name, u64 val) 105 { 106 struct bt_ctf_field *field; 107 bool sign = bt_ctf_field_type_integer_get_signed(type); 108 int ret; 109 110 field = bt_ctf_field_create(type); 111 if (!field) { 112 pr_err("failed to create a field %s\n", name); 113 return -1; 114 } 115 116 if (sign) { 117 ret = bt_ctf_field_signed_integer_set_value(field, val); 118 if (ret) { 119 pr_err("failed to set field value %s\n", name); 120 goto err; 121 } 122 } else { 123 ret = bt_ctf_field_unsigned_integer_set_value(field, val); 124 if (ret) { 125 pr_err("failed to set field value %s\n", name); 126 goto err; 127 } 128 } 129 130 ret = bt_ctf_event_set_payload(event, name, field); 131 if (ret) { 132 pr_err("failed to set payload %s\n", name); 133 goto err; 134 } 135 136 pr2(" SET [%s = %" PRIu64 "]\n", name, val); 137 138 err: 139 bt_ctf_field_put(field); 140 return ret; 141 } 142 143 #define __FUNC_VALUE_SET(_name, _val_type) \ 144 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ 145 struct bt_ctf_event *event, \ 146 const char *name, \ 147 _val_type val) \ 148 { \ 149 struct bt_ctf_field_type *type = cw->data._name; \ 150 return value_set(type, event, name, (u64) val); \ 151 } 152 153 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) 154 155 FUNC_VALUE_SET(s32) 156 FUNC_VALUE_SET(u32) 157 FUNC_VALUE_SET(s64) 158 FUNC_VALUE_SET(u64) 159 __FUNC_VALUE_SET(u64_hex, u64) 160 161 static int string_set_value(struct bt_ctf_field *field, const char *string); 162 static __maybe_unused int 163 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event, 164 const char *name, const char *string) 165 { 166 struct bt_ctf_field_type *type = cw->data.string; 167 struct bt_ctf_field *field; 168 int ret = 0; 169 170 field = bt_ctf_field_create(type); 171 if (!field) { 172 pr_err("failed to create a field %s\n", name); 173 return -1; 174 } 175 176 ret = string_set_value(field, string); 177 if (ret) { 178 pr_err("failed to set value %s\n", name); 179 goto err_put_field; 180 } 181 182 ret = bt_ctf_event_set_payload(event, name, field); 183 if (ret) 184 pr_err("failed to set payload %s\n", name); 185 186 err_put_field: 187 bt_ctf_field_put(field); 188 return ret; 189 } 190 191 static struct bt_ctf_field_type* 192 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field) 193 { 194 unsigned long flags = field->flags; 195 196 if (flags & TEP_FIELD_IS_STRING) 197 return cw->data.string; 198 199 if (!(flags & TEP_FIELD_IS_SIGNED)) { 200 /* unsigned long are mostly pointers */ 201 if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER) 202 return cw->data.u64_hex; 203 } 204 205 if (flags & TEP_FIELD_IS_SIGNED) { 206 if (field->size == 8) 207 return cw->data.s64; 208 else 209 return cw->data.s32; 210 } 211 212 if (field->size == 8) 213 return cw->data.u64; 214 else 215 return cw->data.u32; 216 } 217 218 static unsigned long long adjust_signedness(unsigned long long value_int, int size) 219 { 220 unsigned long long value_mask; 221 222 /* 223 * value_mask = (1 << (size * 8 - 1)) - 1. 224 * Directly set value_mask for code readers. 225 */ 226 switch (size) { 227 case 1: 228 value_mask = 0x7fULL; 229 break; 230 case 2: 231 value_mask = 0x7fffULL; 232 break; 233 case 4: 234 value_mask = 0x7fffffffULL; 235 break; 236 case 8: 237 /* 238 * For 64 bit value, return it self. There is no need 239 * to fill high bit. 240 */ 241 /* Fall through */ 242 default: 243 /* BUG! */ 244 return value_int; 245 } 246 247 /* If it is a positive value, don't adjust. */ 248 if ((value_int & (~0ULL - value_mask)) == 0) 249 return value_int; 250 251 /* Fill upper part of value_int with 1 to make it a negative long long. */ 252 return (value_int & value_mask) | ~value_mask; 253 } 254 255 static int string_set_value(struct bt_ctf_field *field, const char *string) 256 { 257 char *buffer = NULL; 258 size_t len = strlen(string), i, p; 259 int err; 260 261 for (i = p = 0; i < len; i++, p++) { 262 if (isprint(string[i])) { 263 if (!buffer) 264 continue; 265 buffer[p] = string[i]; 266 } else { 267 char numstr[5]; 268 269 snprintf(numstr, sizeof(numstr), "\\x%02x", 270 (unsigned int)(string[i]) & 0xff); 271 272 if (!buffer) { 273 buffer = zalloc(i + (len - i) * 4 + 2); 274 if (!buffer) { 275 pr_err("failed to set unprintable string '%s'\n", string); 276 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); 277 } 278 if (i > 0) 279 strncpy(buffer, string, i); 280 } 281 memcpy(buffer + p, numstr, 4); 282 p += 3; 283 } 284 } 285 286 if (!buffer) 287 return bt_ctf_field_string_set_value(field, string); 288 err = bt_ctf_field_string_set_value(field, buffer); 289 free(buffer); 290 return err; 291 } 292 293 static int add_tracepoint_field_value(struct ctf_writer *cw, 294 struct bt_ctf_event_class *event_class, 295 struct bt_ctf_event *event, 296 struct perf_sample *sample, 297 struct tep_format_field *fmtf) 298 { 299 struct bt_ctf_field_type *type; 300 struct bt_ctf_field *array_field; 301 struct bt_ctf_field *field; 302 const char *name = fmtf->name; 303 void *data = sample->raw_data; 304 unsigned long flags = fmtf->flags; 305 unsigned int n_items; 306 unsigned int i; 307 unsigned int offset; 308 unsigned int len; 309 int ret; 310 311 name = fmtf->alias; 312 offset = fmtf->offset; 313 len = fmtf->size; 314 if (flags & TEP_FIELD_IS_STRING) 315 flags &= ~TEP_FIELD_IS_ARRAY; 316 317 if (flags & TEP_FIELD_IS_DYNAMIC) { 318 unsigned long long tmp_val; 319 320 tmp_val = tep_read_number(fmtf->event->tep, 321 data + offset, len); 322 offset = tmp_val; 323 len = offset >> 16; 324 offset &= 0xffff; 325 if (tep_field_is_relative(flags)) 326 offset += fmtf->offset + fmtf->size; 327 } 328 329 if (flags & TEP_FIELD_IS_ARRAY) { 330 331 type = bt_ctf_event_class_get_field_by_name( 332 event_class, name); 333 array_field = bt_ctf_field_create(type); 334 bt_ctf_field_type_put(type); 335 if (!array_field) { 336 pr_err("Failed to create array type %s\n", name); 337 return -1; 338 } 339 340 len = fmtf->size / fmtf->arraylen; 341 n_items = fmtf->arraylen; 342 } else { 343 n_items = 1; 344 array_field = NULL; 345 } 346 347 type = get_tracepoint_field_type(cw, fmtf); 348 349 for (i = 0; i < n_items; i++) { 350 if (flags & TEP_FIELD_IS_ARRAY) 351 field = bt_ctf_field_array_get_field(array_field, i); 352 else 353 field = bt_ctf_field_create(type); 354 355 if (!field) { 356 pr_err("failed to create a field %s\n", name); 357 return -1; 358 } 359 360 if (flags & TEP_FIELD_IS_STRING) 361 ret = string_set_value(field, data + offset + i * len); 362 else { 363 unsigned long long value_int; 364 365 value_int = tep_read_number( 366 fmtf->event->tep, 367 data + offset + i * len, len); 368 369 if (!(flags & TEP_FIELD_IS_SIGNED)) 370 ret = bt_ctf_field_unsigned_integer_set_value( 371 field, value_int); 372 else 373 ret = bt_ctf_field_signed_integer_set_value( 374 field, adjust_signedness(value_int, len)); 375 } 376 377 if (ret) { 378 pr_err("failed to set file value %s\n", name); 379 goto err_put_field; 380 } 381 if (!(flags & TEP_FIELD_IS_ARRAY)) { 382 ret = bt_ctf_event_set_payload(event, name, field); 383 if (ret) { 384 pr_err("failed to set payload %s\n", name); 385 goto err_put_field; 386 } 387 } 388 bt_ctf_field_put(field); 389 } 390 if (flags & TEP_FIELD_IS_ARRAY) { 391 ret = bt_ctf_event_set_payload(event, name, array_field); 392 if (ret) { 393 pr_err("Failed add payload array %s\n", name); 394 return -1; 395 } 396 bt_ctf_field_put(array_field); 397 } 398 return 0; 399 400 err_put_field: 401 bt_ctf_field_put(field); 402 return -1; 403 } 404 405 static int add_tracepoint_fields_values(struct ctf_writer *cw, 406 struct bt_ctf_event_class *event_class, 407 struct bt_ctf_event *event, 408 struct tep_format_field *fields, 409 struct perf_sample *sample) 410 { 411 struct tep_format_field *field; 412 int ret; 413 414 for (field = fields; field; field = field->next) { 415 ret = add_tracepoint_field_value(cw, event_class, event, sample, 416 field); 417 if (ret) 418 return -1; 419 } 420 return 0; 421 } 422 423 static int add_tracepoint_values(struct ctf_writer *cw, 424 struct bt_ctf_event_class *event_class, 425 struct bt_ctf_event *event, 426 struct evsel *evsel, 427 struct perf_sample *sample) 428 { 429 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields; 430 struct tep_format_field *fields = evsel->tp_format->format.fields; 431 int ret; 432 433 ret = add_tracepoint_fields_values(cw, event_class, event, 434 common_fields, sample); 435 if (!ret) 436 ret = add_tracepoint_fields_values(cw, event_class, event, 437 fields, sample); 438 439 return ret; 440 } 441 442 static int 443 add_bpf_output_values(struct bt_ctf_event_class *event_class, 444 struct bt_ctf_event *event, 445 struct perf_sample *sample) 446 { 447 struct bt_ctf_field_type *len_type, *seq_type; 448 struct bt_ctf_field *len_field, *seq_field; 449 unsigned int raw_size = sample->raw_size; 450 unsigned int nr_elements = raw_size / sizeof(u32); 451 unsigned int i; 452 int ret; 453 454 if (nr_elements * sizeof(u32) != raw_size) 455 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n", 456 raw_size, nr_elements * sizeof(u32) - raw_size); 457 458 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); 459 len_field = bt_ctf_field_create(len_type); 460 if (!len_field) { 461 pr_err("failed to create 'raw_len' for bpf output event\n"); 462 ret = -1; 463 goto put_len_type; 464 } 465 466 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 467 if (ret) { 468 pr_err("failed to set field value for raw_len\n"); 469 goto put_len_field; 470 } 471 ret = bt_ctf_event_set_payload(event, "raw_len", len_field); 472 if (ret) { 473 pr_err("failed to set payload to raw_len\n"); 474 goto put_len_field; 475 } 476 477 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data"); 478 seq_field = bt_ctf_field_create(seq_type); 479 if (!seq_field) { 480 pr_err("failed to create 'raw_data' for bpf output event\n"); 481 ret = -1; 482 goto put_seq_type; 483 } 484 485 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 486 if (ret) { 487 pr_err("failed to set length of 'raw_data'\n"); 488 goto put_seq_field; 489 } 490 491 for (i = 0; i < nr_elements; i++) { 492 struct bt_ctf_field *elem_field = 493 bt_ctf_field_sequence_get_field(seq_field, i); 494 495 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 496 ((u32 *)(sample->raw_data))[i]); 497 498 bt_ctf_field_put(elem_field); 499 if (ret) { 500 pr_err("failed to set raw_data[%d]\n", i); 501 goto put_seq_field; 502 } 503 } 504 505 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); 506 if (ret) 507 pr_err("failed to set payload for raw_data\n"); 508 509 put_seq_field: 510 bt_ctf_field_put(seq_field); 511 put_seq_type: 512 bt_ctf_field_type_put(seq_type); 513 put_len_field: 514 bt_ctf_field_put(len_field); 515 put_len_type: 516 bt_ctf_field_type_put(len_type); 517 return ret; 518 } 519 520 static int 521 add_callchain_output_values(struct bt_ctf_event_class *event_class, 522 struct bt_ctf_event *event, 523 struct ip_callchain *callchain) 524 { 525 struct bt_ctf_field_type *len_type, *seq_type; 526 struct bt_ctf_field *len_field, *seq_field; 527 unsigned int nr_elements = callchain->nr; 528 unsigned int i; 529 int ret; 530 531 len_type = bt_ctf_event_class_get_field_by_name( 532 event_class, "perf_callchain_size"); 533 len_field = bt_ctf_field_create(len_type); 534 if (!len_field) { 535 pr_err("failed to create 'perf_callchain_size' for callchain output event\n"); 536 ret = -1; 537 goto put_len_type; 538 } 539 540 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 541 if (ret) { 542 pr_err("failed to set field value for perf_callchain_size\n"); 543 goto put_len_field; 544 } 545 ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field); 546 if (ret) { 547 pr_err("failed to set payload to perf_callchain_size\n"); 548 goto put_len_field; 549 } 550 551 seq_type = bt_ctf_event_class_get_field_by_name( 552 event_class, "perf_callchain"); 553 seq_field = bt_ctf_field_create(seq_type); 554 if (!seq_field) { 555 pr_err("failed to create 'perf_callchain' for callchain output event\n"); 556 ret = -1; 557 goto put_seq_type; 558 } 559 560 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 561 if (ret) { 562 pr_err("failed to set length of 'perf_callchain'\n"); 563 goto put_seq_field; 564 } 565 566 for (i = 0; i < nr_elements; i++) { 567 struct bt_ctf_field *elem_field = 568 bt_ctf_field_sequence_get_field(seq_field, i); 569 570 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 571 ((u64 *)(callchain->ips))[i]); 572 573 bt_ctf_field_put(elem_field); 574 if (ret) { 575 pr_err("failed to set callchain[%d]\n", i); 576 goto put_seq_field; 577 } 578 } 579 580 ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field); 581 if (ret) 582 pr_err("failed to set payload for raw_data\n"); 583 584 put_seq_field: 585 bt_ctf_field_put(seq_field); 586 put_seq_type: 587 bt_ctf_field_type_put(seq_type); 588 put_len_field: 589 bt_ctf_field_put(len_field); 590 put_len_type: 591 bt_ctf_field_type_put(len_type); 592 return ret; 593 } 594 595 static int add_generic_values(struct ctf_writer *cw, 596 struct bt_ctf_event *event, 597 struct evsel *evsel, 598 struct perf_sample *sample) 599 { 600 u64 type = evsel->core.attr.sample_type; 601 int ret; 602 603 /* 604 * missing: 605 * PERF_SAMPLE_TIME - not needed as we have it in 606 * ctf event header 607 * PERF_SAMPLE_READ - TODO 608 * PERF_SAMPLE_RAW - tracepoint fields are handled separately 609 * PERF_SAMPLE_BRANCH_STACK - TODO 610 * PERF_SAMPLE_REGS_USER - TODO 611 * PERF_SAMPLE_STACK_USER - TODO 612 */ 613 614 if (type & PERF_SAMPLE_IP) { 615 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); 616 if (ret) 617 return -1; 618 } 619 620 if (type & PERF_SAMPLE_TID) { 621 ret = value_set_s32(cw, event, "perf_tid", sample->tid); 622 if (ret) 623 return -1; 624 625 ret = value_set_s32(cw, event, "perf_pid", sample->pid); 626 if (ret) 627 return -1; 628 } 629 630 if ((type & PERF_SAMPLE_ID) || 631 (type & PERF_SAMPLE_IDENTIFIER)) { 632 ret = value_set_u64(cw, event, "perf_id", sample->id); 633 if (ret) 634 return -1; 635 } 636 637 if (type & PERF_SAMPLE_STREAM_ID) { 638 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); 639 if (ret) 640 return -1; 641 } 642 643 if (type & PERF_SAMPLE_PERIOD) { 644 ret = value_set_u64(cw, event, "perf_period", sample->period); 645 if (ret) 646 return -1; 647 } 648 649 if (type & PERF_SAMPLE_WEIGHT) { 650 ret = value_set_u64(cw, event, "perf_weight", sample->weight); 651 if (ret) 652 return -1; 653 } 654 655 if (type & PERF_SAMPLE_DATA_SRC) { 656 ret = value_set_u64(cw, event, "perf_data_src", 657 sample->data_src); 658 if (ret) 659 return -1; 660 } 661 662 if (type & PERF_SAMPLE_TRANSACTION) { 663 ret = value_set_u64(cw, event, "perf_transaction", 664 sample->transaction); 665 if (ret) 666 return -1; 667 } 668 669 return 0; 670 } 671 672 static int ctf_stream__flush(struct ctf_stream *cs) 673 { 674 int err = 0; 675 676 if (cs) { 677 err = bt_ctf_stream_flush(cs->stream); 678 if (err) 679 pr_err("CTF stream %d flush failed\n", cs->cpu); 680 681 pr("Flush stream for cpu %d (%u samples)\n", 682 cs->cpu, cs->count); 683 684 cs->count = 0; 685 } 686 687 return err; 688 } 689 690 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) 691 { 692 struct ctf_stream *cs; 693 struct bt_ctf_field *pkt_ctx = NULL; 694 struct bt_ctf_field *cpu_field = NULL; 695 struct bt_ctf_stream *stream = NULL; 696 int ret; 697 698 cs = zalloc(sizeof(*cs)); 699 if (!cs) { 700 pr_err("Failed to allocate ctf stream\n"); 701 return NULL; 702 } 703 704 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); 705 if (!stream) { 706 pr_err("Failed to create CTF stream\n"); 707 goto out; 708 } 709 710 pkt_ctx = bt_ctf_stream_get_packet_context(stream); 711 if (!pkt_ctx) { 712 pr_err("Failed to obtain packet context\n"); 713 goto out; 714 } 715 716 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); 717 bt_ctf_field_put(pkt_ctx); 718 if (!cpu_field) { 719 pr_err("Failed to obtain cpu field\n"); 720 goto out; 721 } 722 723 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); 724 if (ret) { 725 pr_err("Failed to update CPU number\n"); 726 goto out; 727 } 728 729 bt_ctf_field_put(cpu_field); 730 731 cs->cpu = cpu; 732 cs->stream = stream; 733 return cs; 734 735 out: 736 if (cpu_field) 737 bt_ctf_field_put(cpu_field); 738 if (stream) 739 bt_ctf_stream_put(stream); 740 741 free(cs); 742 return NULL; 743 } 744 745 static void ctf_stream__delete(struct ctf_stream *cs) 746 { 747 if (cs) { 748 bt_ctf_stream_put(cs->stream); 749 free(cs); 750 } 751 } 752 753 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) 754 { 755 struct ctf_stream *cs = cw->stream[cpu]; 756 757 if (!cs) { 758 cs = ctf_stream__create(cw, cpu); 759 cw->stream[cpu] = cs; 760 } 761 762 return cs; 763 } 764 765 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, 766 struct evsel *evsel) 767 { 768 int cpu = 0; 769 770 if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU) 771 cpu = sample->cpu; 772 773 if (cpu > cw->stream_cnt) { 774 pr_err("Event was recorded for CPU %d, limit is at %d.\n", 775 cpu, cw->stream_cnt); 776 cpu = 0; 777 } 778 779 return cpu; 780 } 781 782 #define STREAM_FLUSH_COUNT 100000 783 784 /* 785 * Currently we have no other way to determine the 786 * time for the stream flush other than keep track 787 * of the number of events and check it against 788 * threshold. 789 */ 790 static bool is_flush_needed(struct ctf_stream *cs) 791 { 792 return cs->count >= STREAM_FLUSH_COUNT; 793 } 794 795 static int process_sample_event(struct perf_tool *tool, 796 union perf_event *_event, 797 struct perf_sample *sample, 798 struct evsel *evsel, 799 struct machine *machine __maybe_unused) 800 { 801 struct convert *c = container_of(tool, struct convert, tool); 802 struct evsel_priv *priv = evsel->priv; 803 struct ctf_writer *cw = &c->writer; 804 struct ctf_stream *cs; 805 struct bt_ctf_event_class *event_class; 806 struct bt_ctf_event *event; 807 int ret; 808 unsigned long type = evsel->core.attr.sample_type; 809 810 if (WARN_ONCE(!priv, "Failed to setup all events.\n")) 811 return 0; 812 813 event_class = priv->event_class; 814 815 /* update stats */ 816 c->events_count++; 817 c->events_size += _event->header.size; 818 819 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); 820 821 event = bt_ctf_event_create(event_class); 822 if (!event) { 823 pr_err("Failed to create an CTF event\n"); 824 return -1; 825 } 826 827 bt_ctf_clock_set_time(cw->clock, sample->time); 828 829 ret = add_generic_values(cw, event, evsel, sample); 830 if (ret) 831 return -1; 832 833 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 834 ret = add_tracepoint_values(cw, event_class, event, 835 evsel, sample); 836 if (ret) 837 return -1; 838 } 839 840 if (type & PERF_SAMPLE_CALLCHAIN) { 841 ret = add_callchain_output_values(event_class, 842 event, sample->callchain); 843 if (ret) 844 return -1; 845 } 846 847 if (evsel__is_bpf_output(evsel)) { 848 ret = add_bpf_output_values(event_class, event, sample); 849 if (ret) 850 return -1; 851 } 852 853 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); 854 if (cs) { 855 if (is_flush_needed(cs)) 856 ctf_stream__flush(cs); 857 858 cs->count++; 859 bt_ctf_stream_append_event(cs->stream, event); 860 } 861 862 bt_ctf_event_put(event); 863 return cs ? 0 : -1; 864 } 865 866 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \ 867 do { \ 868 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\ 869 if (ret) \ 870 return -1; \ 871 } while(0) 872 873 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \ 874 static int process_##_name##_event(struct perf_tool *tool, \ 875 union perf_event *_event, \ 876 struct perf_sample *sample, \ 877 struct machine *machine) \ 878 { \ 879 struct convert *c = container_of(tool, struct convert, tool);\ 880 struct ctf_writer *cw = &c->writer; \ 881 struct bt_ctf_event_class *event_class = cw->_name##_class;\ 882 struct bt_ctf_event *event; \ 883 struct ctf_stream *cs; \ 884 int ret; \ 885 \ 886 c->non_sample_count++; \ 887 c->events_size += _event->header.size; \ 888 event = bt_ctf_event_create(event_class); \ 889 if (!event) { \ 890 pr_err("Failed to create an CTF event\n"); \ 891 return -1; \ 892 } \ 893 \ 894 bt_ctf_clock_set_time(cw->clock, sample->time); \ 895 body \ 896 cs = ctf_stream(cw, 0); \ 897 if (cs) { \ 898 if (is_flush_needed(cs)) \ 899 ctf_stream__flush(cs); \ 900 \ 901 cs->count++; \ 902 bt_ctf_stream_append_event(cs->stream, event); \ 903 } \ 904 bt_ctf_event_put(event); \ 905 \ 906 return perf_event__process_##_name(tool, _event, sample, machine);\ 907 } 908 909 __FUNC_PROCESS_NON_SAMPLE(comm, 910 __NON_SAMPLE_SET_FIELD(comm, u32, pid); 911 __NON_SAMPLE_SET_FIELD(comm, u32, tid); 912 __NON_SAMPLE_SET_FIELD(comm, string, comm); 913 ) 914 __FUNC_PROCESS_NON_SAMPLE(fork, 915 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 916 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 917 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 918 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 919 __NON_SAMPLE_SET_FIELD(fork, u64, time); 920 ) 921 922 __FUNC_PROCESS_NON_SAMPLE(exit, 923 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 924 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 925 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 926 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 927 __NON_SAMPLE_SET_FIELD(fork, u64, time); 928 ) 929 __FUNC_PROCESS_NON_SAMPLE(mmap, 930 __NON_SAMPLE_SET_FIELD(mmap, u32, pid); 931 __NON_SAMPLE_SET_FIELD(mmap, u32, tid); 932 __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start); 933 __NON_SAMPLE_SET_FIELD(mmap, string, filename); 934 ) 935 __FUNC_PROCESS_NON_SAMPLE(mmap2, 936 __NON_SAMPLE_SET_FIELD(mmap2, u32, pid); 937 __NON_SAMPLE_SET_FIELD(mmap2, u32, tid); 938 __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start); 939 __NON_SAMPLE_SET_FIELD(mmap2, string, filename); 940 ) 941 #undef __NON_SAMPLE_SET_FIELD 942 #undef __FUNC_PROCESS_NON_SAMPLE 943 944 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ 945 static char *change_name(char *name, char *orig_name, int dup) 946 { 947 char *new_name = NULL; 948 size_t len; 949 950 if (!name) 951 name = orig_name; 952 953 if (dup >= 10) 954 goto out; 955 /* 956 * Add '_' prefix to potential keywork. According to 957 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com), 958 * further CTF spec updating may require us to use '$'. 959 */ 960 if (dup < 0) 961 len = strlen(name) + sizeof("_"); 962 else 963 len = strlen(orig_name) + sizeof("_dupl_X"); 964 965 new_name = malloc(len); 966 if (!new_name) 967 goto out; 968 969 if (dup < 0) 970 snprintf(new_name, len, "_%s", name); 971 else 972 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); 973 974 out: 975 if (name != orig_name) 976 free(name); 977 return new_name; 978 } 979 980 static int event_class_add_field(struct bt_ctf_event_class *event_class, 981 struct bt_ctf_field_type *type, 982 struct tep_format_field *field) 983 { 984 struct bt_ctf_field_type *t = NULL; 985 char *name; 986 int dup = 1; 987 int ret; 988 989 /* alias was already assigned */ 990 if (field->alias != field->name) 991 return bt_ctf_event_class_add_field(event_class, type, 992 (char *)field->alias); 993 994 name = field->name; 995 996 /* If 'name' is a keywork, add prefix. */ 997 if (bt_ctf_validate_identifier(name)) 998 name = change_name(name, field->name, -1); 999 1000 if (!name) { 1001 pr_err("Failed to fix invalid identifier."); 1002 return -1; 1003 } 1004 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { 1005 bt_ctf_field_type_put(t); 1006 name = change_name(name, field->name, dup++); 1007 if (!name) { 1008 pr_err("Failed to create dup name for '%s'\n", field->name); 1009 return -1; 1010 } 1011 } 1012 1013 ret = bt_ctf_event_class_add_field(event_class, type, name); 1014 if (!ret) 1015 field->alias = name; 1016 1017 return ret; 1018 } 1019 1020 static int add_tracepoint_fields_types(struct ctf_writer *cw, 1021 struct tep_format_field *fields, 1022 struct bt_ctf_event_class *event_class) 1023 { 1024 struct tep_format_field *field; 1025 int ret; 1026 1027 for (field = fields; field; field = field->next) { 1028 struct bt_ctf_field_type *type; 1029 unsigned long flags = field->flags; 1030 1031 pr2(" field '%s'\n", field->name); 1032 1033 type = get_tracepoint_field_type(cw, field); 1034 if (!type) 1035 return -1; 1036 1037 /* 1038 * A string is an array of chars. For this we use the string 1039 * type and don't care that it is an array. What we don't 1040 * support is an array of strings. 1041 */ 1042 if (flags & TEP_FIELD_IS_STRING) 1043 flags &= ~TEP_FIELD_IS_ARRAY; 1044 1045 if (flags & TEP_FIELD_IS_ARRAY) 1046 type = bt_ctf_field_type_array_create(type, field->arraylen); 1047 1048 ret = event_class_add_field(event_class, type, field); 1049 1050 if (flags & TEP_FIELD_IS_ARRAY) 1051 bt_ctf_field_type_put(type); 1052 1053 if (ret) { 1054 pr_err("Failed to add field '%s': %d\n", 1055 field->name, ret); 1056 return -1; 1057 } 1058 } 1059 1060 return 0; 1061 } 1062 1063 static int add_tracepoint_types(struct ctf_writer *cw, 1064 struct evsel *evsel, 1065 struct bt_ctf_event_class *class) 1066 { 1067 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields; 1068 struct tep_format_field *fields = evsel->tp_format->format.fields; 1069 int ret; 1070 1071 ret = add_tracepoint_fields_types(cw, common_fields, class); 1072 if (!ret) 1073 ret = add_tracepoint_fields_types(cw, fields, class); 1074 1075 return ret; 1076 } 1077 1078 static int add_bpf_output_types(struct ctf_writer *cw, 1079 struct bt_ctf_event_class *class) 1080 { 1081 struct bt_ctf_field_type *len_type = cw->data.u32; 1082 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex; 1083 struct bt_ctf_field_type *seq_type; 1084 int ret; 1085 1086 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len"); 1087 if (ret) 1088 return ret; 1089 1090 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len"); 1091 if (!seq_type) 1092 return -1; 1093 1094 return bt_ctf_event_class_add_field(class, seq_type, "raw_data"); 1095 } 1096 1097 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel, 1098 struct bt_ctf_event_class *event_class) 1099 { 1100 u64 type = evsel->core.attr.sample_type; 1101 1102 /* 1103 * missing: 1104 * PERF_SAMPLE_TIME - not needed as we have it in 1105 * ctf event header 1106 * PERF_SAMPLE_READ - TODO 1107 * PERF_SAMPLE_CALLCHAIN - TODO 1108 * PERF_SAMPLE_RAW - tracepoint fields and BPF output 1109 * are handled separately 1110 * PERF_SAMPLE_BRANCH_STACK - TODO 1111 * PERF_SAMPLE_REGS_USER - TODO 1112 * PERF_SAMPLE_STACK_USER - TODO 1113 */ 1114 1115 #define ADD_FIELD(cl, t, n) \ 1116 do { \ 1117 pr2(" field '%s'\n", n); \ 1118 if (bt_ctf_event_class_add_field(cl, t, n)) { \ 1119 pr_err("Failed to add field '%s';\n", n); \ 1120 return -1; \ 1121 } \ 1122 } while (0) 1123 1124 if (type & PERF_SAMPLE_IP) 1125 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); 1126 1127 if (type & PERF_SAMPLE_TID) { 1128 ADD_FIELD(event_class, cw->data.s32, "perf_tid"); 1129 ADD_FIELD(event_class, cw->data.s32, "perf_pid"); 1130 } 1131 1132 if ((type & PERF_SAMPLE_ID) || 1133 (type & PERF_SAMPLE_IDENTIFIER)) 1134 ADD_FIELD(event_class, cw->data.u64, "perf_id"); 1135 1136 if (type & PERF_SAMPLE_STREAM_ID) 1137 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); 1138 1139 if (type & PERF_SAMPLE_PERIOD) 1140 ADD_FIELD(event_class, cw->data.u64, "perf_period"); 1141 1142 if (type & PERF_SAMPLE_WEIGHT) 1143 ADD_FIELD(event_class, cw->data.u64, "perf_weight"); 1144 1145 if (type & PERF_SAMPLE_DATA_SRC) 1146 ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); 1147 1148 if (type & PERF_SAMPLE_TRANSACTION) 1149 ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); 1150 1151 if (type & PERF_SAMPLE_CALLCHAIN) { 1152 ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size"); 1153 ADD_FIELD(event_class, 1154 bt_ctf_field_type_sequence_create( 1155 cw->data.u64_hex, "perf_callchain_size"), 1156 "perf_callchain"); 1157 } 1158 1159 #undef ADD_FIELD 1160 return 0; 1161 } 1162 1163 static int add_event(struct ctf_writer *cw, struct evsel *evsel) 1164 { 1165 struct bt_ctf_event_class *event_class; 1166 struct evsel_priv *priv; 1167 const char *name = evsel__name(evsel); 1168 int ret; 1169 1170 pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type); 1171 1172 event_class = bt_ctf_event_class_create(name); 1173 if (!event_class) 1174 return -1; 1175 1176 ret = add_generic_types(cw, evsel, event_class); 1177 if (ret) 1178 goto err; 1179 1180 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 1181 ret = add_tracepoint_types(cw, evsel, event_class); 1182 if (ret) 1183 goto err; 1184 } 1185 1186 if (evsel__is_bpf_output(evsel)) { 1187 ret = add_bpf_output_types(cw, event_class); 1188 if (ret) 1189 goto err; 1190 } 1191 1192 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); 1193 if (ret) { 1194 pr("Failed to add event class into stream.\n"); 1195 goto err; 1196 } 1197 1198 priv = malloc(sizeof(*priv)); 1199 if (!priv) 1200 goto err; 1201 1202 priv->event_class = event_class; 1203 evsel->priv = priv; 1204 return 0; 1205 1206 err: 1207 bt_ctf_event_class_put(event_class); 1208 pr_err("Failed to add event '%s'.\n", name); 1209 return -1; 1210 } 1211 1212 static int setup_events(struct ctf_writer *cw, struct perf_session *session) 1213 { 1214 struct evlist *evlist = session->evlist; 1215 struct evsel *evsel; 1216 int ret; 1217 1218 evlist__for_each_entry(evlist, evsel) { 1219 ret = add_event(cw, evsel); 1220 if (ret) 1221 return ret; 1222 } 1223 return 0; 1224 } 1225 1226 #define __NON_SAMPLE_ADD_FIELD(t, n) \ 1227 do { \ 1228 pr2(" field '%s'\n", #n); \ 1229 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\ 1230 pr_err("Failed to add field '%s';\n", #n);\ 1231 return -1; \ 1232 } \ 1233 } while(0) 1234 1235 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \ 1236 static int add_##_name##_event(struct ctf_writer *cw) \ 1237 { \ 1238 struct bt_ctf_event_class *event_class; \ 1239 int ret; \ 1240 \ 1241 pr("Adding "#_name" event\n"); \ 1242 event_class = bt_ctf_event_class_create("perf_" #_name);\ 1243 if (!event_class) \ 1244 return -1; \ 1245 body \ 1246 \ 1247 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\ 1248 if (ret) { \ 1249 pr("Failed to add event class '"#_name"' into stream.\n");\ 1250 return ret; \ 1251 } \ 1252 \ 1253 cw->_name##_class = event_class; \ 1254 bt_ctf_event_class_put(event_class); \ 1255 return 0; \ 1256 } 1257 1258 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm, 1259 __NON_SAMPLE_ADD_FIELD(u32, pid); 1260 __NON_SAMPLE_ADD_FIELD(u32, tid); 1261 __NON_SAMPLE_ADD_FIELD(string, comm); 1262 ) 1263 1264 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork, 1265 __NON_SAMPLE_ADD_FIELD(u32, pid); 1266 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1267 __NON_SAMPLE_ADD_FIELD(u32, tid); 1268 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1269 __NON_SAMPLE_ADD_FIELD(u64, time); 1270 ) 1271 1272 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit, 1273 __NON_SAMPLE_ADD_FIELD(u32, pid); 1274 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1275 __NON_SAMPLE_ADD_FIELD(u32, tid); 1276 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1277 __NON_SAMPLE_ADD_FIELD(u64, time); 1278 ) 1279 1280 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap, 1281 __NON_SAMPLE_ADD_FIELD(u32, pid); 1282 __NON_SAMPLE_ADD_FIELD(u32, tid); 1283 __NON_SAMPLE_ADD_FIELD(u64_hex, start); 1284 __NON_SAMPLE_ADD_FIELD(string, filename); 1285 ) 1286 1287 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2, 1288 __NON_SAMPLE_ADD_FIELD(u32, pid); 1289 __NON_SAMPLE_ADD_FIELD(u32, tid); 1290 __NON_SAMPLE_ADD_FIELD(u64_hex, start); 1291 __NON_SAMPLE_ADD_FIELD(string, filename); 1292 ) 1293 #undef __NON_SAMPLE_ADD_FIELD 1294 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS 1295 1296 static int setup_non_sample_events(struct ctf_writer *cw, 1297 struct perf_session *session __maybe_unused) 1298 { 1299 int ret; 1300 1301 ret = add_comm_event(cw); 1302 if (ret) 1303 return ret; 1304 ret = add_exit_event(cw); 1305 if (ret) 1306 return ret; 1307 ret = add_fork_event(cw); 1308 if (ret) 1309 return ret; 1310 ret = add_mmap_event(cw); 1311 if (ret) 1312 return ret; 1313 ret = add_mmap2_event(cw); 1314 if (ret) 1315 return ret; 1316 return 0; 1317 } 1318 1319 static void cleanup_events(struct perf_session *session) 1320 { 1321 struct evlist *evlist = session->evlist; 1322 struct evsel *evsel; 1323 1324 evlist__for_each_entry(evlist, evsel) { 1325 struct evsel_priv *priv; 1326 1327 priv = evsel->priv; 1328 bt_ctf_event_class_put(priv->event_class); 1329 zfree(&evsel->priv); 1330 } 1331 1332 evlist__delete(evlist); 1333 session->evlist = NULL; 1334 } 1335 1336 static int setup_streams(struct ctf_writer *cw, struct perf_session *session) 1337 { 1338 struct ctf_stream **stream; 1339 struct perf_header *ph = &session->header; 1340 int ncpus; 1341 1342 /* 1343 * Try to get the number of cpus used in the data file, 1344 * if not present fallback to the MAX_CPUS. 1345 */ 1346 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; 1347 1348 stream = zalloc(sizeof(*stream) * ncpus); 1349 if (!stream) { 1350 pr_err("Failed to allocate streams.\n"); 1351 return -ENOMEM; 1352 } 1353 1354 cw->stream = stream; 1355 cw->stream_cnt = ncpus; 1356 return 0; 1357 } 1358 1359 static void free_streams(struct ctf_writer *cw) 1360 { 1361 int cpu; 1362 1363 for (cpu = 0; cpu < cw->stream_cnt; cpu++) 1364 ctf_stream__delete(cw->stream[cpu]); 1365 1366 zfree(&cw->stream); 1367 } 1368 1369 static int ctf_writer__setup_env(struct ctf_writer *cw, 1370 struct perf_session *session) 1371 { 1372 struct perf_header *header = &session->header; 1373 struct bt_ctf_writer *writer = cw->writer; 1374 1375 #define ADD(__n, __v) \ 1376 do { \ 1377 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ 1378 return -1; \ 1379 } while (0) 1380 1381 ADD("host", header->env.hostname); 1382 ADD("sysname", "Linux"); 1383 ADD("release", header->env.os_release); 1384 ADD("version", header->env.version); 1385 ADD("machine", header->env.arch); 1386 ADD("domain", "kernel"); 1387 ADD("tracer_name", "perf"); 1388 1389 #undef ADD 1390 return 0; 1391 } 1392 1393 static int ctf_writer__setup_clock(struct ctf_writer *cw, 1394 struct perf_session *session, 1395 bool tod) 1396 { 1397 struct bt_ctf_clock *clock = cw->clock; 1398 const char *desc = "perf clock"; 1399 int64_t offset = 0; 1400 1401 if (tod) { 1402 struct perf_env *env = &session->header.env; 1403 1404 if (!env->clock.enabled) { 1405 pr_err("Can't provide --tod time, missing clock data. " 1406 "Please record with -k/--clockid option.\n"); 1407 return -1; 1408 } 1409 1410 desc = clockid_name(env->clock.clockid); 1411 offset = env->clock.tod_ns - env->clock.clockid_ns; 1412 } 1413 1414 #define SET(__n, __v) \ 1415 do { \ 1416 if (bt_ctf_clock_set_##__n(clock, __v)) \ 1417 return -1; \ 1418 } while (0) 1419 1420 SET(frequency, 1000000000); 1421 SET(offset, offset); 1422 SET(description, desc); 1423 SET(precision, 10); 1424 SET(is_absolute, 0); 1425 1426 #undef SET 1427 return 0; 1428 } 1429 1430 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) 1431 { 1432 struct bt_ctf_field_type *type; 1433 1434 type = bt_ctf_field_type_integer_create(size); 1435 if (!type) 1436 return NULL; 1437 1438 if (sign && 1439 bt_ctf_field_type_integer_set_signed(type, 1)) 1440 goto err; 1441 1442 if (hex && 1443 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) 1444 goto err; 1445 1446 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 1447 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN); 1448 #else 1449 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN); 1450 #endif 1451 1452 pr2("Created type: INTEGER %d-bit %ssigned %s\n", 1453 size, sign ? "un" : "", hex ? "hex" : ""); 1454 return type; 1455 1456 err: 1457 bt_ctf_field_type_put(type); 1458 return NULL; 1459 } 1460 1461 static void ctf_writer__cleanup_data(struct ctf_writer *cw) 1462 { 1463 unsigned int i; 1464 1465 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) 1466 bt_ctf_field_type_put(cw->data.array[i]); 1467 } 1468 1469 static int ctf_writer__init_data(struct ctf_writer *cw) 1470 { 1471 #define CREATE_INT_TYPE(type, size, sign, hex) \ 1472 do { \ 1473 (type) = create_int_type(size, sign, hex); \ 1474 if (!(type)) \ 1475 goto err; \ 1476 } while (0) 1477 1478 CREATE_INT_TYPE(cw->data.s64, 64, true, false); 1479 CREATE_INT_TYPE(cw->data.u64, 64, false, false); 1480 CREATE_INT_TYPE(cw->data.s32, 32, true, false); 1481 CREATE_INT_TYPE(cw->data.u32, 32, false, false); 1482 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true); 1483 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); 1484 1485 cw->data.string = bt_ctf_field_type_string_create(); 1486 if (cw->data.string) 1487 return 0; 1488 1489 err: 1490 ctf_writer__cleanup_data(cw); 1491 pr_err("Failed to create data types.\n"); 1492 return -1; 1493 } 1494 1495 static void ctf_writer__cleanup(struct ctf_writer *cw) 1496 { 1497 ctf_writer__cleanup_data(cw); 1498 1499 bt_ctf_clock_put(cw->clock); 1500 free_streams(cw); 1501 bt_ctf_stream_class_put(cw->stream_class); 1502 bt_ctf_writer_put(cw->writer); 1503 1504 /* and NULL all the pointers */ 1505 memset(cw, 0, sizeof(*cw)); 1506 } 1507 1508 static int ctf_writer__init(struct ctf_writer *cw, const char *path, 1509 struct perf_session *session, bool tod) 1510 { 1511 struct bt_ctf_writer *writer; 1512 struct bt_ctf_stream_class *stream_class; 1513 struct bt_ctf_clock *clock; 1514 struct bt_ctf_field_type *pkt_ctx_type; 1515 int ret; 1516 1517 /* CTF writer */ 1518 writer = bt_ctf_writer_create(path); 1519 if (!writer) 1520 goto err; 1521 1522 cw->writer = writer; 1523 1524 /* CTF clock */ 1525 clock = bt_ctf_clock_create("perf_clock"); 1526 if (!clock) { 1527 pr("Failed to create CTF clock.\n"); 1528 goto err_cleanup; 1529 } 1530 1531 cw->clock = clock; 1532 1533 if (ctf_writer__setup_clock(cw, session, tod)) { 1534 pr("Failed to setup CTF clock.\n"); 1535 goto err_cleanup; 1536 } 1537 1538 /* CTF stream class */ 1539 stream_class = bt_ctf_stream_class_create("perf_stream"); 1540 if (!stream_class) { 1541 pr("Failed to create CTF stream class.\n"); 1542 goto err_cleanup; 1543 } 1544 1545 cw->stream_class = stream_class; 1546 1547 /* CTF clock stream setup */ 1548 if (bt_ctf_stream_class_set_clock(stream_class, clock)) { 1549 pr("Failed to assign CTF clock to stream class.\n"); 1550 goto err_cleanup; 1551 } 1552 1553 if (ctf_writer__init_data(cw)) 1554 goto err_cleanup; 1555 1556 /* Add cpu_id for packet context */ 1557 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); 1558 if (!pkt_ctx_type) 1559 goto err_cleanup; 1560 1561 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); 1562 bt_ctf_field_type_put(pkt_ctx_type); 1563 if (ret) 1564 goto err_cleanup; 1565 1566 /* CTF clock writer setup */ 1567 if (bt_ctf_writer_add_clock(writer, clock)) { 1568 pr("Failed to assign CTF clock to writer.\n"); 1569 goto err_cleanup; 1570 } 1571 1572 return 0; 1573 1574 err_cleanup: 1575 ctf_writer__cleanup(cw); 1576 err: 1577 pr_err("Failed to setup CTF writer.\n"); 1578 return -1; 1579 } 1580 1581 static int ctf_writer__flush_streams(struct ctf_writer *cw) 1582 { 1583 int cpu, ret = 0; 1584 1585 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) 1586 ret = ctf_stream__flush(cw->stream[cpu]); 1587 1588 return ret; 1589 } 1590 1591 static int convert__config(const char *var, const char *value, void *cb) 1592 { 1593 struct convert *c = cb; 1594 1595 if (!strcmp(var, "convert.queue-size")) 1596 return perf_config_u64(&c->queue_size, var, value); 1597 1598 return 0; 1599 } 1600 1601 int bt_convert__perf2ctf(const char *input, const char *path, 1602 struct perf_data_convert_opts *opts) 1603 { 1604 struct perf_session *session; 1605 struct perf_data data = { 1606 .path = input, 1607 .mode = PERF_DATA_MODE_READ, 1608 .force = opts->force, 1609 }; 1610 struct convert c = { 1611 .tool = { 1612 .sample = process_sample_event, 1613 .mmap = perf_event__process_mmap, 1614 .mmap2 = perf_event__process_mmap2, 1615 .comm = perf_event__process_comm, 1616 .exit = perf_event__process_exit, 1617 .fork = perf_event__process_fork, 1618 .lost = perf_event__process_lost, 1619 .tracing_data = perf_event__process_tracing_data, 1620 .build_id = perf_event__process_build_id, 1621 .namespaces = perf_event__process_namespaces, 1622 .ordered_events = true, 1623 .ordering_requires_timestamps = true, 1624 }, 1625 }; 1626 struct ctf_writer *cw = &c.writer; 1627 int err; 1628 1629 if (opts->all) { 1630 c.tool.comm = process_comm_event; 1631 c.tool.exit = process_exit_event; 1632 c.tool.fork = process_fork_event; 1633 c.tool.mmap = process_mmap_event; 1634 c.tool.mmap2 = process_mmap2_event; 1635 } 1636 1637 err = perf_config(convert__config, &c); 1638 if (err) 1639 return err; 1640 1641 err = -1; 1642 /* perf.data session */ 1643 session = perf_session__new(&data, &c.tool); 1644 if (IS_ERR(session)) 1645 return PTR_ERR(session); 1646 1647 /* CTF writer */ 1648 if (ctf_writer__init(cw, path, session, opts->tod)) 1649 goto free_session; 1650 1651 if (c.queue_size) { 1652 ordered_events__set_alloc_size(&session->ordered_events, 1653 c.queue_size); 1654 } 1655 1656 /* CTF writer env/clock setup */ 1657 if (ctf_writer__setup_env(cw, session)) 1658 goto free_writer; 1659 1660 /* CTF events setup */ 1661 if (setup_events(cw, session)) 1662 goto free_writer; 1663 1664 if (opts->all && setup_non_sample_events(cw, session)) 1665 goto free_writer; 1666 1667 if (setup_streams(cw, session)) 1668 goto free_writer; 1669 1670 err = perf_session__process_events(session); 1671 if (!err) 1672 err = ctf_writer__flush_streams(cw); 1673 else 1674 pr_err("Error during conversion.\n"); 1675 1676 fprintf(stderr, 1677 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1678 data.path, path); 1679 1680 fprintf(stderr, 1681 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples", 1682 (double) c.events_size / 1024.0 / 1024.0, 1683 c.events_count); 1684 1685 if (!c.non_sample_count) 1686 fprintf(stderr, ") ]\n"); 1687 else 1688 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count); 1689 1690 cleanup_events(session); 1691 perf_session__delete(session); 1692 ctf_writer__cleanup(cw); 1693 1694 return err; 1695 1696 free_writer: 1697 ctf_writer__cleanup(cw); 1698 free_session: 1699 perf_session__delete(session); 1700 pr_err("Error during conversion setup.\n"); 1701 return err; 1702 } 1703
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.