~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/perf/util/session.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <errno.h>
  3 #include <signal.h>
  4 #include <inttypes.h>
  5 #include <linux/err.h>
  6 #include <linux/kernel.h>
  7 #include <linux/zalloc.h>
  8 #include <api/fs/fs.h>
  9 
 10 #include <byteswap.h>
 11 #include <unistd.h>
 12 #include <sys/types.h>
 13 #include <sys/mman.h>
 14 #include <perf/cpumap.h>
 15 
 16 #include "map_symbol.h"
 17 #include "branch.h"
 18 #include "debug.h"
 19 #include "env.h"
 20 #include "evlist.h"
 21 #include "evsel.h"
 22 #include "memswap.h"
 23 #include "map.h"
 24 #include "symbol.h"
 25 #include "session.h"
 26 #include "tool.h"
 27 #include "perf_regs.h"
 28 #include "asm/bug.h"
 29 #include "auxtrace.h"
 30 #include "thread.h"
 31 #include "thread-stack.h"
 32 #include "sample-raw.h"
 33 #include "stat.h"
 34 #include "tsc.h"
 35 #include "ui/progress.h"
 36 #include "util.h"
 37 #include "arch/common.h"
 38 #include "units.h"
 39 #include <internal/lib.h>
 40 
 41 #ifdef HAVE_ZSTD_SUPPORT
 42 static int perf_session__process_compressed_event(struct perf_session *session,
 43                                                   union perf_event *event, u64 file_offset,
 44                                                   const char *file_path)
 45 {
 46         void *src;
 47         size_t decomp_size, src_size;
 48         u64 decomp_last_rem = 0;
 49         size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
 50         struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
 51 
 52         if (decomp_last) {
 53                 decomp_last_rem = decomp_last->size - decomp_last->head;
 54                 decomp_len += decomp_last_rem;
 55         }
 56 
 57         mmap_len = sizeof(struct decomp) + decomp_len;
 58         decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
 59                       MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
 60         if (decomp == MAP_FAILED) {
 61                 pr_err("Couldn't allocate memory for decompression\n");
 62                 return -1;
 63         }
 64 
 65         decomp->file_pos = file_offset;
 66         decomp->file_path = file_path;
 67         decomp->mmap_len = mmap_len;
 68         decomp->head = 0;
 69 
 70         if (decomp_last_rem) {
 71                 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
 72                 decomp->size = decomp_last_rem;
 73         }
 74 
 75         src = (void *)event + sizeof(struct perf_record_compressed);
 76         src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
 77 
 78         decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
 79                                 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
 80         if (!decomp_size) {
 81                 munmap(decomp, mmap_len);
 82                 pr_err("Couldn't decompress data\n");
 83                 return -1;
 84         }
 85 
 86         decomp->size += decomp_size;
 87 
 88         if (session->active_decomp->decomp == NULL)
 89                 session->active_decomp->decomp = decomp;
 90         else
 91                 session->active_decomp->decomp_last->next = decomp;
 92 
 93         session->active_decomp->decomp_last = decomp;
 94 
 95         pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
 96 
 97         return 0;
 98 }
 99 #else /* !HAVE_ZSTD_SUPPORT */
100 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
101 #endif
102 
103 static int perf_session__deliver_event(struct perf_session *session,
104                                        union perf_event *event,
105                                        struct perf_tool *tool,
106                                        u64 file_offset,
107                                        const char *file_path);
108 
109 static int perf_session__open(struct perf_session *session, int repipe_fd)
110 {
111         struct perf_data *data = session->data;
112 
113         if (perf_session__read_header(session, repipe_fd) < 0) {
114                 pr_err("incompatible file format (rerun with -v to learn more)\n");
115                 return -1;
116         }
117 
118         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) {
119                 /* Auxiliary events may reference exited threads, hold onto dead ones. */
120                 symbol_conf.keep_exited_threads = true;
121         }
122 
123         if (perf_data__is_pipe(data))
124                 return 0;
125 
126         if (perf_header__has_feat(&session->header, HEADER_STAT))
127                 return 0;
128 
129         if (!evlist__valid_sample_type(session->evlist)) {
130                 pr_err("non matching sample_type\n");
131                 return -1;
132         }
133 
134         if (!evlist__valid_sample_id_all(session->evlist)) {
135                 pr_err("non matching sample_id_all\n");
136                 return -1;
137         }
138 
139         if (!evlist__valid_read_format(session->evlist)) {
140                 pr_err("non matching read_format\n");
141                 return -1;
142         }
143 
144         return 0;
145 }
146 
147 void perf_session__set_id_hdr_size(struct perf_session *session)
148 {
149         u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
150 
151         machines__set_id_hdr_size(&session->machines, id_hdr_size);
152 }
153 
154 int perf_session__create_kernel_maps(struct perf_session *session)
155 {
156         int ret = machine__create_kernel_maps(&session->machines.host);
157 
158         if (ret >= 0)
159                 ret = machines__create_guest_kernel_maps(&session->machines);
160         return ret;
161 }
162 
163 static void perf_session__destroy_kernel_maps(struct perf_session *session)
164 {
165         machines__destroy_kernel_maps(&session->machines);
166 }
167 
168 static bool perf_session__has_comm_exec(struct perf_session *session)
169 {
170         struct evsel *evsel;
171 
172         evlist__for_each_entry(session->evlist, evsel) {
173                 if (evsel->core.attr.comm_exec)
174                         return true;
175         }
176 
177         return false;
178 }
179 
180 static void perf_session__set_comm_exec(struct perf_session *session)
181 {
182         bool comm_exec = perf_session__has_comm_exec(session);
183 
184         machines__set_comm_exec(&session->machines, comm_exec);
185 }
186 
187 static int ordered_events__deliver_event(struct ordered_events *oe,
188                                          struct ordered_event *event)
189 {
190         struct perf_session *session = container_of(oe, struct perf_session,
191                                                     ordered_events);
192 
193         return perf_session__deliver_event(session, event->event,
194                                            session->tool, event->file_offset,
195                                            event->file_path);
196 }
197 
198 struct perf_session *__perf_session__new(struct perf_data *data,
199                                          bool repipe, int repipe_fd,
200                                          struct perf_tool *tool)
201 {
202         int ret = -ENOMEM;
203         struct perf_session *session = zalloc(sizeof(*session));
204 
205         if (!session)
206                 goto out;
207 
208         session->repipe = repipe;
209         session->tool   = tool;
210         session->decomp_data.zstd_decomp = &session->zstd_data;
211         session->active_decomp = &session->decomp_data;
212         INIT_LIST_HEAD(&session->auxtrace_index);
213         machines__init(&session->machines);
214         ordered_events__init(&session->ordered_events,
215                              ordered_events__deliver_event, NULL);
216 
217         perf_env__init(&session->header.env);
218         if (data) {
219                 ret = perf_data__open(data);
220                 if (ret < 0)
221                         goto out_delete;
222 
223                 session->data = data;
224 
225                 if (perf_data__is_read(data)) {
226                         ret = perf_session__open(session, repipe_fd);
227                         if (ret < 0)
228                                 goto out_delete;
229 
230                         /*
231                          * set session attributes that are present in perf.data
232                          * but not in pipe-mode.
233                          */
234                         if (!data->is_pipe) {
235                                 perf_session__set_id_hdr_size(session);
236                                 perf_session__set_comm_exec(session);
237                         }
238 
239                         evlist__init_trace_event_sample_raw(session->evlist);
240 
241                         /* Open the directory data. */
242                         if (data->is_dir) {
243                                 ret = perf_data__open_dir(data);
244                                 if (ret)
245                                         goto out_delete;
246                         }
247 
248                         if (!symbol_conf.kallsyms_name &&
249                             !symbol_conf.vmlinux_name)
250                                 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
251                 }
252         } else  {
253                 session->machines.host.env = &perf_env;
254         }
255 
256         session->machines.host.single_address_space =
257                 perf_env__single_address_space(session->machines.host.env);
258 
259         if (!data || perf_data__is_write(data)) {
260                 /*
261                  * In O_RDONLY mode this will be performed when reading the
262                  * kernel MMAP event, in perf_event__process_mmap().
263                  */
264                 if (perf_session__create_kernel_maps(session) < 0)
265                         pr_warning("Cannot read kernel map\n");
266         }
267 
268         /*
269          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
270          * processed, so evlist__sample_id_all is not meaningful here.
271          */
272         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
273             tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
274                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
275                 tool->ordered_events = false;
276         }
277 
278         return session;
279 
280  out_delete:
281         perf_session__delete(session);
282  out:
283         return ERR_PTR(ret);
284 }
285 
286 static void perf_decomp__release_events(struct decomp *next)
287 {
288         struct decomp *decomp;
289         size_t mmap_len;
290 
291         do {
292                 decomp = next;
293                 if (decomp == NULL)
294                         break;
295                 next = decomp->next;
296                 mmap_len = decomp->mmap_len;
297                 munmap(decomp, mmap_len);
298         } while (1);
299 }
300 
301 void perf_session__delete(struct perf_session *session)
302 {
303         if (session == NULL)
304                 return;
305         auxtrace__free(session);
306         auxtrace_index__free(&session->auxtrace_index);
307         perf_session__destroy_kernel_maps(session);
308         perf_decomp__release_events(session->decomp_data.decomp);
309         perf_env__exit(&session->header.env);
310         machines__exit(&session->machines);
311         if (session->data) {
312                 if (perf_data__is_read(session->data))
313                         evlist__delete(session->evlist);
314                 perf_data__close(session->data);
315         }
316 #ifdef HAVE_LIBTRACEEVENT
317         trace_event__cleanup(&session->tevent);
318 #endif
319         free(session);
320 }
321 
322 static int process_event_synth_tracing_data_stub(struct perf_session *session
323                                                  __maybe_unused,
324                                                  union perf_event *event
325                                                  __maybe_unused)
326 {
327         dump_printf(": unhandled!\n");
328         return 0;
329 }
330 
331 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
332                                          union perf_event *event __maybe_unused,
333                                          struct evlist **pevlist
334                                          __maybe_unused)
335 {
336         dump_printf(": unhandled!\n");
337         return 0;
338 }
339 
340 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
341                                                  union perf_event *event __maybe_unused,
342                                                  struct evlist **pevlist
343                                                  __maybe_unused)
344 {
345         if (dump_trace)
346                 perf_event__fprintf_event_update(event, stdout);
347 
348         dump_printf(": unhandled!\n");
349         return 0;
350 }
351 
352 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
353                                      union perf_event *event __maybe_unused,
354                                      struct perf_sample *sample __maybe_unused,
355                                      struct evsel *evsel __maybe_unused,
356                                      struct machine *machine __maybe_unused)
357 {
358         dump_printf(": unhandled!\n");
359         return 0;
360 }
361 
362 static int process_event_stub(struct perf_tool *tool __maybe_unused,
363                               union perf_event *event __maybe_unused,
364                               struct perf_sample *sample __maybe_unused,
365                               struct machine *machine __maybe_unused)
366 {
367         dump_printf(": unhandled!\n");
368         return 0;
369 }
370 
371 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
372                                        union perf_event *event __maybe_unused,
373                                        struct ordered_events *oe __maybe_unused)
374 {
375         dump_printf(": unhandled!\n");
376         return 0;
377 }
378 
379 static int skipn(int fd, off_t n)
380 {
381         char buf[4096];
382         ssize_t ret;
383 
384         while (n > 0) {
385                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
386                 if (ret <= 0)
387                         return ret;
388                 n -= ret;
389         }
390 
391         return 0;
392 }
393 
394 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
395                                        union perf_event *event)
396 {
397         dump_printf(": unhandled!\n");
398         if (perf_data__is_pipe(session->data))
399                 skipn(perf_data__fd(session->data), event->auxtrace.size);
400         return event->auxtrace.size;
401 }
402 
403 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
404                                   union perf_event *event __maybe_unused)
405 {
406         dump_printf(": unhandled!\n");
407         return 0;
408 }
409 
410 
411 static
412 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
413                                   union perf_event *event __maybe_unused)
414 {
415         if (dump_trace)
416                 perf_event__fprintf_thread_map(event, stdout);
417 
418         dump_printf(": unhandled!\n");
419         return 0;
420 }
421 
422 static
423 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
424                                union perf_event *event __maybe_unused)
425 {
426         if (dump_trace)
427                 perf_event__fprintf_cpu_map(event, stdout);
428 
429         dump_printf(": unhandled!\n");
430         return 0;
431 }
432 
433 static
434 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
435                                    union perf_event *event __maybe_unused)
436 {
437         if (dump_trace)
438                 perf_event__fprintf_stat_config(event, stdout);
439 
440         dump_printf(": unhandled!\n");
441         return 0;
442 }
443 
444 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
445                              union perf_event *event)
446 {
447         if (dump_trace)
448                 perf_event__fprintf_stat(event, stdout);
449 
450         dump_printf(": unhandled!\n");
451         return 0;
452 }
453 
454 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
455                                    union perf_event *event)
456 {
457         if (dump_trace)
458                 perf_event__fprintf_stat_round(event, stdout);
459 
460         dump_printf(": unhandled!\n");
461         return 0;
462 }
463 
464 static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
465                                         union perf_event *event)
466 {
467         if (dump_trace)
468                 perf_event__fprintf_time_conv(event, stdout);
469 
470         dump_printf(": unhandled!\n");
471         return 0;
472 }
473 
474 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
475                                                        union perf_event *event __maybe_unused,
476                                                        u64 file_offset __maybe_unused,
477                                                        const char *file_path __maybe_unused)
478 {
479        dump_printf(": unhandled!\n");
480        return 0;
481 }
482 
483 void perf_tool__fill_defaults(struct perf_tool *tool)
484 {
485         if (tool->sample == NULL)
486                 tool->sample = process_event_sample_stub;
487         if (tool->mmap == NULL)
488                 tool->mmap = process_event_stub;
489         if (tool->mmap2 == NULL)
490                 tool->mmap2 = process_event_stub;
491         if (tool->comm == NULL)
492                 tool->comm = process_event_stub;
493         if (tool->namespaces == NULL)
494                 tool->namespaces = process_event_stub;
495         if (tool->cgroup == NULL)
496                 tool->cgroup = process_event_stub;
497         if (tool->fork == NULL)
498                 tool->fork = process_event_stub;
499         if (tool->exit == NULL)
500                 tool->exit = process_event_stub;
501         if (tool->lost == NULL)
502                 tool->lost = perf_event__process_lost;
503         if (tool->lost_samples == NULL)
504                 tool->lost_samples = perf_event__process_lost_samples;
505         if (tool->aux == NULL)
506                 tool->aux = perf_event__process_aux;
507         if (tool->itrace_start == NULL)
508                 tool->itrace_start = perf_event__process_itrace_start;
509         if (tool->context_switch == NULL)
510                 tool->context_switch = perf_event__process_switch;
511         if (tool->ksymbol == NULL)
512                 tool->ksymbol = perf_event__process_ksymbol;
513         if (tool->bpf == NULL)
514                 tool->bpf = perf_event__process_bpf;
515         if (tool->text_poke == NULL)
516                 tool->text_poke = perf_event__process_text_poke;
517         if (tool->aux_output_hw_id == NULL)
518                 tool->aux_output_hw_id = perf_event__process_aux_output_hw_id;
519         if (tool->read == NULL)
520                 tool->read = process_event_sample_stub;
521         if (tool->throttle == NULL)
522                 tool->throttle = process_event_stub;
523         if (tool->unthrottle == NULL)
524                 tool->unthrottle = process_event_stub;
525         if (tool->attr == NULL)
526                 tool->attr = process_event_synth_attr_stub;
527         if (tool->event_update == NULL)
528                 tool->event_update = process_event_synth_event_update_stub;
529         if (tool->tracing_data == NULL)
530                 tool->tracing_data = process_event_synth_tracing_data_stub;
531         if (tool->build_id == NULL)
532                 tool->build_id = process_event_op2_stub;
533         if (tool->finished_round == NULL) {
534                 if (tool->ordered_events)
535                         tool->finished_round = perf_event__process_finished_round;
536                 else
537                         tool->finished_round = process_finished_round_stub;
538         }
539         if (tool->id_index == NULL)
540                 tool->id_index = process_event_op2_stub;
541         if (tool->auxtrace_info == NULL)
542                 tool->auxtrace_info = process_event_op2_stub;
543         if (tool->auxtrace == NULL)
544                 tool->auxtrace = process_event_auxtrace_stub;
545         if (tool->auxtrace_error == NULL)
546                 tool->auxtrace_error = process_event_op2_stub;
547         if (tool->thread_map == NULL)
548                 tool->thread_map = process_event_thread_map_stub;
549         if (tool->cpu_map == NULL)
550                 tool->cpu_map = process_event_cpu_map_stub;
551         if (tool->stat_config == NULL)
552                 tool->stat_config = process_event_stat_config_stub;
553         if (tool->stat == NULL)
554                 tool->stat = process_stat_stub;
555         if (tool->stat_round == NULL)
556                 tool->stat_round = process_stat_round_stub;
557         if (tool->time_conv == NULL)
558                 tool->time_conv = process_event_time_conv_stub;
559         if (tool->feature == NULL)
560                 tool->feature = process_event_op2_stub;
561         if (tool->compressed == NULL)
562                 tool->compressed = perf_session__process_compressed_event;
563         if (tool->finished_init == NULL)
564                 tool->finished_init = process_event_op2_stub;
565 }
566 
567 static void swap_sample_id_all(union perf_event *event, void *data)
568 {
569         void *end = (void *) event + event->header.size;
570         int size = end - data;
571 
572         BUG_ON(size % sizeof(u64));
573         mem_bswap_64(data, size);
574 }
575 
576 static void perf_event__all64_swap(union perf_event *event,
577                                    bool sample_id_all __maybe_unused)
578 {
579         struct perf_event_header *hdr = &event->header;
580         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
581 }
582 
583 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
584 {
585         event->comm.pid = bswap_32(event->comm.pid);
586         event->comm.tid = bswap_32(event->comm.tid);
587 
588         if (sample_id_all) {
589                 void *data = &event->comm.comm;
590 
591                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
592                 swap_sample_id_all(event, data);
593         }
594 }
595 
596 static void perf_event__mmap_swap(union perf_event *event,
597                                   bool sample_id_all)
598 {
599         event->mmap.pid   = bswap_32(event->mmap.pid);
600         event->mmap.tid   = bswap_32(event->mmap.tid);
601         event->mmap.start = bswap_64(event->mmap.start);
602         event->mmap.len   = bswap_64(event->mmap.len);
603         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
604 
605         if (sample_id_all) {
606                 void *data = &event->mmap.filename;
607 
608                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
609                 swap_sample_id_all(event, data);
610         }
611 }
612 
613 static void perf_event__mmap2_swap(union perf_event *event,
614                                   bool sample_id_all)
615 {
616         event->mmap2.pid   = bswap_32(event->mmap2.pid);
617         event->mmap2.tid   = bswap_32(event->mmap2.tid);
618         event->mmap2.start = bswap_64(event->mmap2.start);
619         event->mmap2.len   = bswap_64(event->mmap2.len);
620         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
621 
622         if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
623                 event->mmap2.maj   = bswap_32(event->mmap2.maj);
624                 event->mmap2.min   = bswap_32(event->mmap2.min);
625                 event->mmap2.ino   = bswap_64(event->mmap2.ino);
626                 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
627         }
628 
629         if (sample_id_all) {
630                 void *data = &event->mmap2.filename;
631 
632                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
633                 swap_sample_id_all(event, data);
634         }
635 }
636 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
637 {
638         event->fork.pid  = bswap_32(event->fork.pid);
639         event->fork.tid  = bswap_32(event->fork.tid);
640         event->fork.ppid = bswap_32(event->fork.ppid);
641         event->fork.ptid = bswap_32(event->fork.ptid);
642         event->fork.time = bswap_64(event->fork.time);
643 
644         if (sample_id_all)
645                 swap_sample_id_all(event, &event->fork + 1);
646 }
647 
648 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
649 {
650         event->read.pid          = bswap_32(event->read.pid);
651         event->read.tid          = bswap_32(event->read.tid);
652         event->read.value        = bswap_64(event->read.value);
653         event->read.time_enabled = bswap_64(event->read.time_enabled);
654         event->read.time_running = bswap_64(event->read.time_running);
655         event->read.id           = bswap_64(event->read.id);
656 
657         if (sample_id_all)
658                 swap_sample_id_all(event, &event->read + 1);
659 }
660 
661 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
662 {
663         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
664         event->aux.aux_size   = bswap_64(event->aux.aux_size);
665         event->aux.flags      = bswap_64(event->aux.flags);
666 
667         if (sample_id_all)
668                 swap_sample_id_all(event, &event->aux + 1);
669 }
670 
671 static void perf_event__itrace_start_swap(union perf_event *event,
672                                           bool sample_id_all)
673 {
674         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
675         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
676 
677         if (sample_id_all)
678                 swap_sample_id_all(event, &event->itrace_start + 1);
679 }
680 
681 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
682 {
683         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
684                 event->context_switch.next_prev_pid =
685                                 bswap_32(event->context_switch.next_prev_pid);
686                 event->context_switch.next_prev_tid =
687                                 bswap_32(event->context_switch.next_prev_tid);
688         }
689 
690         if (sample_id_all)
691                 swap_sample_id_all(event, &event->context_switch + 1);
692 }
693 
694 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
695 {
696         event->text_poke.addr    = bswap_64(event->text_poke.addr);
697         event->text_poke.old_len = bswap_16(event->text_poke.old_len);
698         event->text_poke.new_len = bswap_16(event->text_poke.new_len);
699 
700         if (sample_id_all) {
701                 size_t len = sizeof(event->text_poke.old_len) +
702                              sizeof(event->text_poke.new_len) +
703                              event->text_poke.old_len +
704                              event->text_poke.new_len;
705                 void *data = &event->text_poke.old_len;
706 
707                 data += PERF_ALIGN(len, sizeof(u64));
708                 swap_sample_id_all(event, data);
709         }
710 }
711 
712 static void perf_event__throttle_swap(union perf_event *event,
713                                       bool sample_id_all)
714 {
715         event->throttle.time      = bswap_64(event->throttle.time);
716         event->throttle.id        = bswap_64(event->throttle.id);
717         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
718 
719         if (sample_id_all)
720                 swap_sample_id_all(event, &event->throttle + 1);
721 }
722 
723 static void perf_event__namespaces_swap(union perf_event *event,
724                                         bool sample_id_all)
725 {
726         u64 i;
727 
728         event->namespaces.pid           = bswap_32(event->namespaces.pid);
729         event->namespaces.tid           = bswap_32(event->namespaces.tid);
730         event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
731 
732         for (i = 0; i < event->namespaces.nr_namespaces; i++) {
733                 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
734 
735                 ns->dev = bswap_64(ns->dev);
736                 ns->ino = bswap_64(ns->ino);
737         }
738 
739         if (sample_id_all)
740                 swap_sample_id_all(event, &event->namespaces.link_info[i]);
741 }
742 
743 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
744 {
745         event->cgroup.id = bswap_64(event->cgroup.id);
746 
747         if (sample_id_all) {
748                 void *data = &event->cgroup.path;
749 
750                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
751                 swap_sample_id_all(event, data);
752         }
753 }
754 
755 static u8 revbyte(u8 b)
756 {
757         int rev = (b >> 4) | ((b & 0xf) << 4);
758         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
759         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
760         return (u8) rev;
761 }
762 
763 /*
764  * XXX this is hack in attempt to carry flags bitfield
765  * through endian village. ABI says:
766  *
767  * Bit-fields are allocated from right to left (least to most significant)
768  * on little-endian implementations and from left to right (most to least
769  * significant) on big-endian implementations.
770  *
771  * The above seems to be byte specific, so we need to reverse each
772  * byte of the bitfield. 'Internet' also says this might be implementation
773  * specific and we probably need proper fix and carry perf_event_attr
774  * bitfield flags in separate data file FEAT_ section. Thought this seems
775  * to work for now.
776  */
777 static void swap_bitfield(u8 *p, unsigned len)
778 {
779         unsigned i;
780 
781         for (i = 0; i < len; i++) {
782                 *p = revbyte(*p);
783                 p++;
784         }
785 }
786 
787 /* exported for swapping attributes in file header */
788 void perf_event__attr_swap(struct perf_event_attr *attr)
789 {
790         attr->type              = bswap_32(attr->type);
791         attr->size              = bswap_32(attr->size);
792 
793 #define bswap_safe(f, n)                                        \
794         (attr->size > (offsetof(struct perf_event_attr, f) +    \
795                        sizeof(attr->f) * (n)))
796 #define bswap_field(f, sz)                      \
797 do {                                            \
798         if (bswap_safe(f, 0))                   \
799                 attr->f = bswap_##sz(attr->f);  \
800 } while(0)
801 #define bswap_field_16(f) bswap_field(f, 16)
802 #define bswap_field_32(f) bswap_field(f, 32)
803 #define bswap_field_64(f) bswap_field(f, 64)
804 
805         bswap_field_64(config);
806         bswap_field_64(sample_period);
807         bswap_field_64(sample_type);
808         bswap_field_64(read_format);
809         bswap_field_32(wakeup_events);
810         bswap_field_32(bp_type);
811         bswap_field_64(bp_addr);
812         bswap_field_64(bp_len);
813         bswap_field_64(branch_sample_type);
814         bswap_field_64(sample_regs_user);
815         bswap_field_32(sample_stack_user);
816         bswap_field_32(aux_watermark);
817         bswap_field_16(sample_max_stack);
818         bswap_field_32(aux_sample_size);
819 
820         /*
821          * After read_format are bitfields. Check read_format because
822          * we are unable to use offsetof on bitfield.
823          */
824         if (bswap_safe(read_format, 1))
825                 swap_bitfield((u8 *) (&attr->read_format + 1),
826                               sizeof(u64));
827 #undef bswap_field_64
828 #undef bswap_field_32
829 #undef bswap_field
830 #undef bswap_safe
831 }
832 
833 static void perf_event__hdr_attr_swap(union perf_event *event,
834                                       bool sample_id_all __maybe_unused)
835 {
836         size_t size;
837 
838         perf_event__attr_swap(&event->attr.attr);
839 
840         size = event->header.size;
841         size -= perf_record_header_attr_id(event) - (void *)event;
842         mem_bswap_64(perf_record_header_attr_id(event), size);
843 }
844 
845 static void perf_event__event_update_swap(union perf_event *event,
846                                           bool sample_id_all __maybe_unused)
847 {
848         event->event_update.type = bswap_64(event->event_update.type);
849         event->event_update.id   = bswap_64(event->event_update.id);
850 }
851 
852 static void perf_event__event_type_swap(union perf_event *event,
853                                         bool sample_id_all __maybe_unused)
854 {
855         event->event_type.event_type.event_id =
856                 bswap_64(event->event_type.event_type.event_id);
857 }
858 
859 static void perf_event__tracing_data_swap(union perf_event *event,
860                                           bool sample_id_all __maybe_unused)
861 {
862         event->tracing_data.size = bswap_32(event->tracing_data.size);
863 }
864 
865 static void perf_event__auxtrace_info_swap(union perf_event *event,
866                                            bool sample_id_all __maybe_unused)
867 {
868         size_t size;
869 
870         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
871 
872         size = event->header.size;
873         size -= (void *)&event->auxtrace_info.priv - (void *)event;
874         mem_bswap_64(event->auxtrace_info.priv, size);
875 }
876 
877 static void perf_event__auxtrace_swap(union perf_event *event,
878                                       bool sample_id_all __maybe_unused)
879 {
880         event->auxtrace.size      = bswap_64(event->auxtrace.size);
881         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
882         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
883         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
884         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
885         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
886 }
887 
888 static void perf_event__auxtrace_error_swap(union perf_event *event,
889                                             bool sample_id_all __maybe_unused)
890 {
891         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
892         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
893         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
894         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
895         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
896         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
897         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
898         if (event->auxtrace_error.fmt)
899                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
900         if (event->auxtrace_error.fmt >= 2) {
901                 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
902                 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
903         }
904 }
905 
906 static void perf_event__thread_map_swap(union perf_event *event,
907                                         bool sample_id_all __maybe_unused)
908 {
909         unsigned i;
910 
911         event->thread_map.nr = bswap_64(event->thread_map.nr);
912 
913         for (i = 0; i < event->thread_map.nr; i++)
914                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
915 }
916 
917 static void perf_event__cpu_map_swap(union perf_event *event,
918                                      bool sample_id_all __maybe_unused)
919 {
920         struct perf_record_cpu_map_data *data = &event->cpu_map.data;
921 
922         data->type = bswap_16(data->type);
923 
924         switch (data->type) {
925         case PERF_CPU_MAP__CPUS:
926                 data->cpus_data.nr = bswap_16(data->cpus_data.nr);
927 
928                 for (unsigned i = 0; i < data->cpus_data.nr; i++)
929                         data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
930                 break;
931         case PERF_CPU_MAP__MASK:
932                 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
933 
934                 switch (data->mask32_data.long_size) {
935                 case 4:
936                         data->mask32_data.nr = bswap_16(data->mask32_data.nr);
937                         for (unsigned i = 0; i < data->mask32_data.nr; i++)
938                                 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
939                         break;
940                 case 8:
941                         data->mask64_data.nr = bswap_16(data->mask64_data.nr);
942                         for (unsigned i = 0; i < data->mask64_data.nr; i++)
943                                 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
944                         break;
945                 default:
946                         pr_err("cpu_map swap: unsupported long size\n");
947                 }
948                 break;
949         case PERF_CPU_MAP__RANGE_CPUS:
950                 data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
951                 data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
952                 break;
953         default:
954                 break;
955         }
956 }
957 
958 static void perf_event__stat_config_swap(union perf_event *event,
959                                          bool sample_id_all __maybe_unused)
960 {
961         u64 size;
962 
963         size  = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
964         size += 1; /* nr item itself */
965         mem_bswap_64(&event->stat_config.nr, size);
966 }
967 
968 static void perf_event__stat_swap(union perf_event *event,
969                                   bool sample_id_all __maybe_unused)
970 {
971         event->stat.id     = bswap_64(event->stat.id);
972         event->stat.thread = bswap_32(event->stat.thread);
973         event->stat.cpu    = bswap_32(event->stat.cpu);
974         event->stat.val    = bswap_64(event->stat.val);
975         event->stat.ena    = bswap_64(event->stat.ena);
976         event->stat.run    = bswap_64(event->stat.run);
977 }
978 
979 static void perf_event__stat_round_swap(union perf_event *event,
980                                         bool sample_id_all __maybe_unused)
981 {
982         event->stat_round.type = bswap_64(event->stat_round.type);
983         event->stat_round.time = bswap_64(event->stat_round.time);
984 }
985 
986 static void perf_event__time_conv_swap(union perf_event *event,
987                                        bool sample_id_all __maybe_unused)
988 {
989         event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
990         event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
991         event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
992 
993         if (event_contains(event->time_conv, time_cycles)) {
994                 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
995                 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
996         }
997 }
998 
999 typedef void (*perf_event__swap_op)(union perf_event *event,
1000                                     bool sample_id_all);
1001 
1002 static perf_event__swap_op perf_event__swap_ops[] = {
1003         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
1004         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
1005         [PERF_RECORD_COMM]                = perf_event__comm_swap,
1006         [PERF_RECORD_FORK]                = perf_event__task_swap,
1007         [PERF_RECORD_EXIT]                = perf_event__task_swap,
1008         [PERF_RECORD_LOST]                = perf_event__all64_swap,
1009         [PERF_RECORD_READ]                = perf_event__read_swap,
1010         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
1011         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
1012         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
1013         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
1014         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
1015         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
1016         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
1017         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
1018         [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
1019         [PERF_RECORD_CGROUP]              = perf_event__cgroup_swap,
1020         [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
1021         [PERF_RECORD_AUX_OUTPUT_HW_ID]    = perf_event__all64_swap,
1022         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
1023         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
1024         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
1025         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
1026         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
1027         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
1028         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
1029         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
1030         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
1031         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
1032         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
1033         [PERF_RECORD_STAT]                = perf_event__stat_swap,
1034         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
1035         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
1036         [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
1037         [PERF_RECORD_HEADER_MAX]          = NULL,
1038 };
1039 
1040 /*
1041  * When perf record finishes a pass on every buffers, it records this pseudo
1042  * event.
1043  * We record the max timestamp t found in the pass n.
1044  * Assuming these timestamps are monotonic across cpus, we know that if
1045  * a buffer still has events with timestamps below t, they will be all
1046  * available and then read in the pass n + 1.
1047  * Hence when we start to read the pass n + 2, we can safely flush every
1048  * events with timestamps below t.
1049  *
1050  *    ============ PASS n =================
1051  *       CPU 0         |   CPU 1
1052  *                     |
1053  *    cnt1 timestamps  |   cnt2 timestamps
1054  *          1          |         2
1055  *          2          |         3
1056  *          -          |         4  <--- max recorded
1057  *
1058  *    ============ PASS n + 1 ==============
1059  *       CPU 0         |   CPU 1
1060  *                     |
1061  *    cnt1 timestamps  |   cnt2 timestamps
1062  *          3          |         5
1063  *          4          |         6
1064  *          5          |         7 <---- max recorded
1065  *
1066  *      Flush every events below timestamp 4
1067  *
1068  *    ============ PASS n + 2 ==============
1069  *       CPU 0         |   CPU 1
1070  *                     |
1071  *    cnt1 timestamps  |   cnt2 timestamps
1072  *          6          |         8
1073  *          7          |         9
1074  *          -          |         10
1075  *
1076  *      Flush every events below timestamp 7
1077  *      etc...
1078  */
1079 int perf_event__process_finished_round(struct perf_tool *tool __maybe_unused,
1080                                        union perf_event *event __maybe_unused,
1081                                        struct ordered_events *oe)
1082 {
1083         if (dump_trace)
1084                 fprintf(stdout, "\n");
1085         return ordered_events__flush(oe, OE_FLUSH__ROUND);
1086 }
1087 
1088 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1089                               u64 timestamp, u64 file_offset, const char *file_path)
1090 {
1091         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
1092 }
1093 
1094 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1095 {
1096         struct ip_callchain *callchain = sample->callchain;
1097         struct branch_stack *lbr_stack = sample->branch_stack;
1098         struct branch_entry *entries = perf_sample__branch_entries(sample);
1099         u64 kernel_callchain_nr = callchain->nr;
1100         unsigned int i;
1101 
1102         for (i = 0; i < kernel_callchain_nr; i++) {
1103                 if (callchain->ips[i] == PERF_CONTEXT_USER)
1104                         break;
1105         }
1106 
1107         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1108                 u64 total_nr;
1109                 /*
1110                  * LBR callstack can only get user call chain,
1111                  * i is kernel call chain number,
1112                  * 1 is PERF_CONTEXT_USER.
1113                  *
1114                  * The user call chain is stored in LBR registers.
1115                  * LBR are pair registers. The caller is stored
1116                  * in "from" register, while the callee is stored
1117                  * in "to" register.
1118                  * For example, there is a call stack
1119                  * "A"->"B"->"C"->"D".
1120                  * The LBR registers will be recorded like
1121                  * "C"->"D", "B"->"C", "A"->"B".
1122                  * So only the first "to" register and all "from"
1123                  * registers are needed to construct the whole stack.
1124                  */
1125                 total_nr = i + 1 + lbr_stack->nr + 1;
1126                 kernel_callchain_nr = i + 1;
1127 
1128                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1129 
1130                 for (i = 0; i < kernel_callchain_nr; i++)
1131                         printf("..... %2d: %016" PRIx64 "\n",
1132                                i, callchain->ips[i]);
1133 
1134                 printf("..... %2d: %016" PRIx64 "\n",
1135                        (int)(kernel_callchain_nr), entries[0].to);
1136                 for (i = 0; i < lbr_stack->nr; i++)
1137                         printf("..... %2d: %016" PRIx64 "\n",
1138                                (int)(i + kernel_callchain_nr + 1), entries[i].from);
1139         }
1140 }
1141 
1142 static void callchain__printf(struct evsel *evsel,
1143                               struct perf_sample *sample)
1144 {
1145         unsigned int i;
1146         struct ip_callchain *callchain = sample->callchain;
1147 
1148         if (evsel__has_branch_callstack(evsel))
1149                 callchain__lbr_callstack_printf(sample);
1150 
1151         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1152 
1153         for (i = 0; i < callchain->nr; i++)
1154                 printf("..... %2d: %016" PRIx64 "\n",
1155                        i, callchain->ips[i]);
1156 }
1157 
1158 static void branch_stack__printf(struct perf_sample *sample,
1159                                  struct evsel *evsel)
1160 {
1161         struct branch_entry *entries = perf_sample__branch_entries(sample);
1162         bool callstack = evsel__has_branch_callstack(evsel);
1163         u64 *branch_stack_cntr = sample->branch_stack_cntr;
1164         struct perf_env *env = evsel__env(evsel);
1165         uint64_t i;
1166 
1167         if (!callstack) {
1168                 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr);
1169         } else {
1170                 /* the reason of adding 1 to nr is because after expanding
1171                  * branch stack it generates nr + 1 callstack records. e.g.,
1172                  *         B()->C()
1173                  *         A()->B()
1174                  * the final callstack should be:
1175                  *         C()
1176                  *         B()
1177                  *         A()
1178                  */
1179                 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1);
1180         }
1181 
1182         for (i = 0; i < sample->branch_stack->nr; i++) {
1183                 struct branch_entry *e = &entries[i];
1184 
1185                 if (!callstack) {
1186                         printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n",
1187                                 i, e->from, e->to,
1188                                 (unsigned short)e->flags.cycles,
1189                                 e->flags.mispred ? "M" : " ",
1190                                 e->flags.predicted ? "P" : " ",
1191                                 e->flags.abort ? "A" : " ",
1192                                 e->flags.in_tx ? "T" : " ",
1193                                 (unsigned)e->flags.reserved,
1194                                 get_branch_type(e),
1195                                 e->flags.spec ? branch_spec_desc(e->flags.spec) : "");
1196                 } else {
1197                         if (i == 0) {
1198                                 printf("..... %2"PRIu64": %016" PRIx64 "\n"
1199                                        "..... %2"PRIu64": %016" PRIx64 "\n",
1200                                                 i, e->to, i+1, e->from);
1201                         } else {
1202                                 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from);
1203                         }
1204                 }
1205         }
1206 
1207         if (branch_stack_cntr) {
1208                 printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n",
1209                         sample->branch_stack->nr, env->br_cntr_width, env->br_cntr_nr);
1210                 for (i = 0; i < sample->branch_stack->nr; i++)
1211                         printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]);
1212         }
1213 }
1214 
1215 static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
1216 {
1217         unsigned rid, i = 0;
1218 
1219         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1220                 u64 val = regs[i++];
1221 
1222                 printf(".... %-5s 0x%016" PRIx64 "\n",
1223                        perf_reg_name(rid, arch), val);
1224         }
1225 }
1226 
1227 static const char *regs_abi[] = {
1228         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1229         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1230         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1231 };
1232 
1233 static inline const char *regs_dump_abi(struct regs_dump *d)
1234 {
1235         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1236                 return "unknown";
1237 
1238         return regs_abi[d->abi];
1239 }
1240 
1241 static void regs__printf(const char *type, struct regs_dump *regs, const char *arch)
1242 {
1243         u64 mask = regs->mask;
1244 
1245         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1246                type,
1247                mask,
1248                regs_dump_abi(regs));
1249 
1250         regs_dump__printf(mask, regs->regs, arch);
1251 }
1252 
1253 static void regs_user__printf(struct perf_sample *sample, const char *arch)
1254 {
1255         struct regs_dump *user_regs = &sample->user_regs;
1256 
1257         if (user_regs->regs)
1258                 regs__printf("user", user_regs, arch);
1259 }
1260 
1261 static void regs_intr__printf(struct perf_sample *sample, const char *arch)
1262 {
1263         struct regs_dump *intr_regs = &sample->intr_regs;
1264 
1265         if (intr_regs->regs)
1266                 regs__printf("intr", intr_regs, arch);
1267 }
1268 
1269 static void stack_user__printf(struct stack_dump *dump)
1270 {
1271         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1272                dump->size, dump->offset);
1273 }
1274 
1275 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1276 {
1277         u64 sample_type = __evlist__combined_sample_type(evlist);
1278 
1279         if (event->header.type != PERF_RECORD_SAMPLE &&
1280             !evlist__sample_id_all(evlist)) {
1281                 fputs("-1 -1 ", stdout);
1282                 return;
1283         }
1284 
1285         if ((sample_type & PERF_SAMPLE_CPU))
1286                 printf("%u ", sample->cpu);
1287 
1288         if (sample_type & PERF_SAMPLE_TIME)
1289                 printf("%" PRIu64 " ", sample->time);
1290 }
1291 
1292 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1293 {
1294         printf("... sample_read:\n");
1295 
1296         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1297                 printf("...... time enabled %016" PRIx64 "\n",
1298                        sample->read.time_enabled);
1299 
1300         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1301                 printf("...... time running %016" PRIx64 "\n",
1302                        sample->read.time_running);
1303 
1304         if (read_format & PERF_FORMAT_GROUP) {
1305                 struct sample_read_value *value = sample->read.group.values;
1306 
1307                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1308 
1309                 sample_read_group__for_each(value, sample->read.group.nr, read_format) {
1310                         printf("..... id %016" PRIx64
1311                                ", value %016" PRIx64,
1312                                value->id, value->value);
1313                         if (read_format & PERF_FORMAT_LOST)
1314                                 printf(", lost %" PRIu64, value->lost);
1315                         printf("\n");
1316                 }
1317         } else {
1318                 printf("..... id %016" PRIx64 ", value %016" PRIx64,
1319                         sample->read.one.id, sample->read.one.value);
1320                 if (read_format & PERF_FORMAT_LOST)
1321                         printf(", lost %" PRIu64, sample->read.one.lost);
1322                 printf("\n");
1323         }
1324 }
1325 
1326 static void dump_event(struct evlist *evlist, union perf_event *event,
1327                        u64 file_offset, struct perf_sample *sample,
1328                        const char *file_path)
1329 {
1330         if (!dump_trace)
1331                 return;
1332 
1333         printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
1334                file_offset, file_path, event->header.size, event->header.type);
1335 
1336         trace_event(event);
1337         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1338                 evlist->trace_event_sample_raw(evlist, event, sample);
1339 
1340         if (sample)
1341                 evlist__print_tstamp(evlist, event, sample);
1342 
1343         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1344                event->header.size, perf_event__name(event->header.type));
1345 }
1346 
1347 char *get_page_size_name(u64 size, char *str)
1348 {
1349         if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1350                 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1351 
1352         return str;
1353 }
1354 
1355 static void dump_sample(struct evsel *evsel, union perf_event *event,
1356                         struct perf_sample *sample, const char *arch)
1357 {
1358         u64 sample_type;
1359         char str[PAGE_SIZE_NAME_LEN];
1360 
1361         if (!dump_trace)
1362                 return;
1363 
1364         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1365                event->header.misc, sample->pid, sample->tid, sample->ip,
1366                sample->period, sample->addr);
1367 
1368         sample_type = evsel->core.attr.sample_type;
1369 
1370         if (evsel__has_callchain(evsel))
1371                 callchain__printf(evsel, sample);
1372 
1373         if (evsel__has_br_stack(evsel))
1374                 branch_stack__printf(sample, evsel);
1375 
1376         if (sample_type & PERF_SAMPLE_REGS_USER)
1377                 regs_user__printf(sample, arch);
1378 
1379         if (sample_type & PERF_SAMPLE_REGS_INTR)
1380                 regs_intr__printf(sample, arch);
1381 
1382         if (sample_type & PERF_SAMPLE_STACK_USER)
1383                 stack_user__printf(&sample->user_stack);
1384 
1385         if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1386                 printf("... weight: %" PRIu64 "", sample->weight);
1387                         if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1388                                 printf(",0x%"PRIx16"", sample->ins_lat);
1389                                 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1390                         }
1391                 printf("\n");
1392         }
1393 
1394         if (sample_type & PERF_SAMPLE_DATA_SRC)
1395                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1396 
1397         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1398                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1399 
1400         if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1401                 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1402 
1403         if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1404                 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1405 
1406         if (sample_type & PERF_SAMPLE_TRANSACTION)
1407                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1408 
1409         if (sample_type & PERF_SAMPLE_READ)
1410                 sample_read__printf(sample, evsel->core.attr.read_format);
1411 }
1412 
1413 static void dump_read(struct evsel *evsel, union perf_event *event)
1414 {
1415         struct perf_record_read *read_event = &event->read;
1416         u64 read_format;
1417 
1418         if (!dump_trace)
1419                 return;
1420 
1421         printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1422                evsel__name(evsel), event->read.value);
1423 
1424         if (!evsel)
1425                 return;
1426 
1427         read_format = evsel->core.attr.read_format;
1428 
1429         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1430                 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1431 
1432         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1433                 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1434 
1435         if (read_format & PERF_FORMAT_ID)
1436                 printf("... id           : %" PRI_lu64 "\n", read_event->id);
1437 
1438         if (read_format & PERF_FORMAT_LOST)
1439                 printf("... lost         : %" PRI_lu64 "\n", read_event->lost);
1440 }
1441 
1442 static struct machine *machines__find_for_cpumode(struct machines *machines,
1443                                                union perf_event *event,
1444                                                struct perf_sample *sample)
1445 {
1446         if (perf_guest &&
1447             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1448              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1449                 u32 pid;
1450 
1451                 if (sample->machine_pid)
1452                         pid = sample->machine_pid;
1453                 else if (event->header.type == PERF_RECORD_MMAP
1454                     || event->header.type == PERF_RECORD_MMAP2)
1455                         pid = event->mmap.pid;
1456                 else
1457                         pid = sample->pid;
1458 
1459                 /*
1460                  * Guest code machine is created as needed and does not use
1461                  * DEFAULT_GUEST_KERNEL_ID.
1462                  */
1463                 if (symbol_conf.guest_code)
1464                         return machines__findnew(machines, pid);
1465 
1466                 return machines__find_guest(machines, pid);
1467         }
1468 
1469         return &machines->host;
1470 }
1471 
1472 static int deliver_sample_value(struct evlist *evlist,
1473                                 struct perf_tool *tool,
1474                                 union perf_event *event,
1475                                 struct perf_sample *sample,
1476                                 struct sample_read_value *v,
1477                                 struct machine *machine)
1478 {
1479         struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1480         struct evsel *evsel;
1481 
1482         if (sid) {
1483                 sample->id     = v->id;
1484                 sample->period = v->value - sid->period;
1485                 sid->period    = v->value;
1486         }
1487 
1488         if (!sid || sid->evsel == NULL) {
1489                 ++evlist->stats.nr_unknown_id;
1490                 return 0;
1491         }
1492 
1493         /*
1494          * There's no reason to deliver sample
1495          * for zero period, bail out.
1496          */
1497         if (!sample->period)
1498                 return 0;
1499 
1500         evsel = container_of(sid->evsel, struct evsel, core);
1501         return tool->sample(tool, event, sample, evsel, machine);
1502 }
1503 
1504 static int deliver_sample_group(struct evlist *evlist,
1505                                 struct perf_tool *tool,
1506                                 union  perf_event *event,
1507                                 struct perf_sample *sample,
1508                                 struct machine *machine,
1509                                 u64 read_format)
1510 {
1511         int ret = -EINVAL;
1512         struct sample_read_value *v = sample->read.group.values;
1513 
1514         if (tool->dont_split_sample_group)
1515                 return deliver_sample_value(evlist, tool, event, sample, v, machine);
1516 
1517         sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1518                 ret = deliver_sample_value(evlist, tool, event, sample, v,
1519                                            machine);
1520                 if (ret)
1521                         break;
1522         }
1523 
1524         return ret;
1525 }
1526 
1527 static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1528                                   union  perf_event *event, struct perf_sample *sample,
1529                                   struct evsel *evsel, struct machine *machine)
1530 {
1531         /* We know evsel != NULL. */
1532         u64 sample_type = evsel->core.attr.sample_type;
1533         u64 read_format = evsel->core.attr.read_format;
1534 
1535         /* Standard sample delivery. */
1536         if (!(sample_type & PERF_SAMPLE_READ))
1537                 return tool->sample(tool, event, sample, evsel, machine);
1538 
1539         /* For PERF_SAMPLE_READ we have either single or group mode. */
1540         if (read_format & PERF_FORMAT_GROUP)
1541                 return deliver_sample_group(evlist, tool, event, sample,
1542                                             machine, read_format);
1543         else
1544                 return deliver_sample_value(evlist, tool, event, sample,
1545                                             &sample->read.one, machine);
1546 }
1547 
1548 static int machines__deliver_event(struct machines *machines,
1549                                    struct evlist *evlist,
1550                                    union perf_event *event,
1551                                    struct perf_sample *sample,
1552                                    struct perf_tool *tool, u64 file_offset,
1553                                    const char *file_path)
1554 {
1555         struct evsel *evsel;
1556         struct machine *machine;
1557 
1558         dump_event(evlist, event, file_offset, sample, file_path);
1559 
1560         evsel = evlist__id2evsel(evlist, sample->id);
1561 
1562         machine = machines__find_for_cpumode(machines, event, sample);
1563 
1564         switch (event->header.type) {
1565         case PERF_RECORD_SAMPLE:
1566                 if (evsel == NULL) {
1567                         ++evlist->stats.nr_unknown_id;
1568                         return 0;
1569                 }
1570                 if (machine == NULL) {
1571                         ++evlist->stats.nr_unprocessable_samples;
1572                         dump_sample(evsel, event, sample, perf_env__arch(NULL));
1573                         return 0;
1574                 }
1575                 dump_sample(evsel, event, sample, perf_env__arch(machine->env));
1576                 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1577         case PERF_RECORD_MMAP:
1578                 return tool->mmap(tool, event, sample, machine);
1579         case PERF_RECORD_MMAP2:
1580                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1581                         ++evlist->stats.nr_proc_map_timeout;
1582                 return tool->mmap2(tool, event, sample, machine);
1583         case PERF_RECORD_COMM:
1584                 return tool->comm(tool, event, sample, machine);
1585         case PERF_RECORD_NAMESPACES:
1586                 return tool->namespaces(tool, event, sample, machine);
1587         case PERF_RECORD_CGROUP:
1588                 return tool->cgroup(tool, event, sample, machine);
1589         case PERF_RECORD_FORK:
1590                 return tool->fork(tool, event, sample, machine);
1591         case PERF_RECORD_EXIT:
1592                 return tool->exit(tool, event, sample, machine);
1593         case PERF_RECORD_LOST:
1594                 if (tool->lost == perf_event__process_lost)
1595                         evlist->stats.total_lost += event->lost.lost;
1596                 return tool->lost(tool, event, sample, machine);
1597         case PERF_RECORD_LOST_SAMPLES:
1598                 if (tool->lost_samples == perf_event__process_lost_samples &&
1599                     !(event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF))
1600                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1601                 return tool->lost_samples(tool, event, sample, machine);
1602         case PERF_RECORD_READ:
1603                 dump_read(evsel, event);
1604                 return tool->read(tool, event, sample, evsel, machine);
1605         case PERF_RECORD_THROTTLE:
1606                 return tool->throttle(tool, event, sample, machine);
1607         case PERF_RECORD_UNTHROTTLE:
1608                 return tool->unthrottle(tool, event, sample, machine);
1609         case PERF_RECORD_AUX:
1610                 if (tool->aux == perf_event__process_aux) {
1611                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1612                                 evlist->stats.total_aux_lost += 1;
1613                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1614                                 evlist->stats.total_aux_partial += 1;
1615                         if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1616                                 evlist->stats.total_aux_collision += 1;
1617                 }
1618                 return tool->aux(tool, event, sample, machine);
1619         case PERF_RECORD_ITRACE_START:
1620                 return tool->itrace_start(tool, event, sample, machine);
1621         case PERF_RECORD_SWITCH:
1622         case PERF_RECORD_SWITCH_CPU_WIDE:
1623                 return tool->context_switch(tool, event, sample, machine);
1624         case PERF_RECORD_KSYMBOL:
1625                 return tool->ksymbol(tool, event, sample, machine);
1626         case PERF_RECORD_BPF_EVENT:
1627                 return tool->bpf(tool, event, sample, machine);
1628         case PERF_RECORD_TEXT_POKE:
1629                 return tool->text_poke(tool, event, sample, machine);
1630         case PERF_RECORD_AUX_OUTPUT_HW_ID:
1631                 return tool->aux_output_hw_id(tool, event, sample, machine);
1632         default:
1633                 ++evlist->stats.nr_unknown_events;
1634                 return -1;
1635         }
1636 }
1637 
1638 static int perf_session__deliver_event(struct perf_session *session,
1639                                        union perf_event *event,
1640                                        struct perf_tool *tool,
1641                                        u64 file_offset,
1642                                        const char *file_path)
1643 {
1644         struct perf_sample sample;
1645         int ret = evlist__parse_sample(session->evlist, event, &sample);
1646 
1647         if (ret) {
1648                 pr_err("Can't parse sample, err = %d\n", ret);
1649                 return ret;
1650         }
1651 
1652         ret = auxtrace__process_event(session, event, &sample, tool);
1653         if (ret < 0)
1654                 return ret;
1655         if (ret > 0)
1656                 return 0;
1657 
1658         ret = machines__deliver_event(&session->machines, session->evlist,
1659                                       event, &sample, tool, file_offset, file_path);
1660 
1661         if (dump_trace && sample.aux_sample.size)
1662                 auxtrace__dump_auxtrace_sample(session, &sample);
1663 
1664         return ret;
1665 }
1666 
1667 static s64 perf_session__process_user_event(struct perf_session *session,
1668                                             union perf_event *event,
1669                                             u64 file_offset,
1670                                             const char *file_path)
1671 {
1672         struct ordered_events *oe = &session->ordered_events;
1673         struct perf_tool *tool = session->tool;
1674         struct perf_sample sample = { .time = 0, };
1675         int fd = perf_data__fd(session->data);
1676         int err;
1677 
1678         if (event->header.type != PERF_RECORD_COMPRESSED ||
1679             tool->compressed == perf_session__process_compressed_event_stub)
1680                 dump_event(session->evlist, event, file_offset, &sample, file_path);
1681 
1682         /* These events are processed right away */
1683         switch (event->header.type) {
1684         case PERF_RECORD_HEADER_ATTR:
1685                 err = tool->attr(tool, event, &session->evlist);
1686                 if (err == 0) {
1687                         perf_session__set_id_hdr_size(session);
1688                         perf_session__set_comm_exec(session);
1689                 }
1690                 return err;
1691         case PERF_RECORD_EVENT_UPDATE:
1692                 return tool->event_update(tool, event, &session->evlist);
1693         case PERF_RECORD_HEADER_EVENT_TYPE:
1694                 /*
1695                  * Deprecated, but we need to handle it for sake
1696                  * of old data files create in pipe mode.
1697                  */
1698                 return 0;
1699         case PERF_RECORD_HEADER_TRACING_DATA:
1700                 /*
1701                  * Setup for reading amidst mmap, but only when we
1702                  * are in 'file' mode. The 'pipe' fd is in proper
1703                  * place already.
1704                  */
1705                 if (!perf_data__is_pipe(session->data))
1706                         lseek(fd, file_offset, SEEK_SET);
1707                 return tool->tracing_data(session, event);
1708         case PERF_RECORD_HEADER_BUILD_ID:
1709                 return tool->build_id(session, event);
1710         case PERF_RECORD_FINISHED_ROUND:
1711                 return tool->finished_round(tool, event, oe);
1712         case PERF_RECORD_ID_INDEX:
1713                 return tool->id_index(session, event);
1714         case PERF_RECORD_AUXTRACE_INFO:
1715                 return tool->auxtrace_info(session, event);
1716         case PERF_RECORD_AUXTRACE:
1717                 /*
1718                  * Setup for reading amidst mmap, but only when we
1719                  * are in 'file' mode.  The 'pipe' fd is in proper
1720                  * place already.
1721                  */
1722                 if (!perf_data__is_pipe(session->data))
1723                         lseek(fd, file_offset + event->header.size, SEEK_SET);
1724                 return tool->auxtrace(session, event);
1725         case PERF_RECORD_AUXTRACE_ERROR:
1726                 perf_session__auxtrace_error_inc(session, event);
1727                 return tool->auxtrace_error(session, event);
1728         case PERF_RECORD_THREAD_MAP:
1729                 return tool->thread_map(session, event);
1730         case PERF_RECORD_CPU_MAP:
1731                 return tool->cpu_map(session, event);
1732         case PERF_RECORD_STAT_CONFIG:
1733                 return tool->stat_config(session, event);
1734         case PERF_RECORD_STAT:
1735                 return tool->stat(session, event);
1736         case PERF_RECORD_STAT_ROUND:
1737                 return tool->stat_round(session, event);
1738         case PERF_RECORD_TIME_CONV:
1739                 session->time_conv = event->time_conv;
1740                 return tool->time_conv(session, event);
1741         case PERF_RECORD_HEADER_FEATURE:
1742                 return tool->feature(session, event);
1743         case PERF_RECORD_COMPRESSED:
1744                 err = tool->compressed(session, event, file_offset, file_path);
1745                 if (err)
1746                         dump_event(session->evlist, event, file_offset, &sample, file_path);
1747                 return err;
1748         case PERF_RECORD_FINISHED_INIT:
1749                 return tool->finished_init(session, event);
1750         default:
1751                 return -EINVAL;
1752         }
1753 }
1754 
1755 int perf_session__deliver_synth_event(struct perf_session *session,
1756                                       union perf_event *event,
1757                                       struct perf_sample *sample)
1758 {
1759         struct evlist *evlist = session->evlist;
1760         struct perf_tool *tool = session->tool;
1761 
1762         events_stats__inc(&evlist->stats, event->header.type);
1763 
1764         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1765                 return perf_session__process_user_event(session, event, 0, NULL);
1766 
1767         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
1768 }
1769 
1770 static void event_swap(union perf_event *event, bool sample_id_all)
1771 {
1772         perf_event__swap_op swap;
1773 
1774         swap = perf_event__swap_ops[event->header.type];
1775         if (swap)
1776                 swap(event, sample_id_all);
1777 }
1778 
1779 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1780                              void *buf, size_t buf_sz,
1781                              union perf_event **event_ptr,
1782                              struct perf_sample *sample)
1783 {
1784         union perf_event *event;
1785         size_t hdr_sz, rest;
1786         int fd;
1787 
1788         if (session->one_mmap && !session->header.needs_swap) {
1789                 event = file_offset - session->one_mmap_offset +
1790                         session->one_mmap_addr;
1791                 goto out_parse_sample;
1792         }
1793 
1794         if (perf_data__is_pipe(session->data))
1795                 return -1;
1796 
1797         fd = perf_data__fd(session->data);
1798         hdr_sz = sizeof(struct perf_event_header);
1799 
1800         if (buf_sz < hdr_sz)
1801                 return -1;
1802 
1803         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1804             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1805                 return -1;
1806 
1807         event = (union perf_event *)buf;
1808 
1809         if (session->header.needs_swap)
1810                 perf_event_header__bswap(&event->header);
1811 
1812         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1813                 return -1;
1814 
1815         buf += hdr_sz;
1816         rest = event->header.size - hdr_sz;
1817 
1818         if (readn(fd, buf, rest) != (ssize_t)rest)
1819                 return -1;
1820 
1821         if (session->header.needs_swap)
1822                 event_swap(event, evlist__sample_id_all(session->evlist));
1823 
1824 out_parse_sample:
1825 
1826         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1827             evlist__parse_sample(session->evlist, event, sample))
1828                 return -1;
1829 
1830         *event_ptr = event;
1831 
1832         return 0;
1833 }
1834 
1835 int perf_session__peek_events(struct perf_session *session, u64 offset,
1836                               u64 size, peek_events_cb_t cb, void *data)
1837 {
1838         u64 max_offset = offset + size;
1839         char buf[PERF_SAMPLE_MAX_SIZE];
1840         union perf_event *event;
1841         int err;
1842 
1843         do {
1844                 err = perf_session__peek_event(session, offset, buf,
1845                                                PERF_SAMPLE_MAX_SIZE, &event,
1846                                                NULL);
1847                 if (err)
1848                         return err;
1849 
1850                 err = cb(session, event, offset, data);
1851                 if (err)
1852                         return err;
1853 
1854                 offset += event->header.size;
1855                 if (event->header.type == PERF_RECORD_AUXTRACE)
1856                         offset += event->auxtrace.size;
1857 
1858         } while (offset < max_offset);
1859 
1860         return err;
1861 }
1862 
1863 static s64 perf_session__process_event(struct perf_session *session,
1864                                        union perf_event *event, u64 file_offset,
1865                                        const char *file_path)
1866 {
1867         struct evlist *evlist = session->evlist;
1868         struct perf_tool *tool = session->tool;
1869         int ret;
1870 
1871         if (session->header.needs_swap)
1872                 event_swap(event, evlist__sample_id_all(evlist));
1873 
1874         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1875                 return -EINVAL;
1876 
1877         events_stats__inc(&evlist->stats, event->header.type);
1878 
1879         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1880                 return perf_session__process_user_event(session, event, file_offset, file_path);
1881 
1882         if (tool->ordered_events) {
1883                 u64 timestamp = -1ULL;
1884 
1885                 ret = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1886                 if (ret && ret != -1)
1887                         return ret;
1888 
1889                 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
1890                 if (ret != -ETIME)
1891                         return ret;
1892         }
1893 
1894         return perf_session__deliver_event(session, event, tool, file_offset, file_path);
1895 }
1896 
1897 void perf_event_header__bswap(struct perf_event_header *hdr)
1898 {
1899         hdr->type = bswap_32(hdr->type);
1900         hdr->misc = bswap_16(hdr->misc);
1901         hdr->size = bswap_16(hdr->size);
1902 }
1903 
1904 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1905 {
1906         return machine__findnew_thread(&session->machines.host, -1, pid);
1907 }
1908 
1909 int perf_session__register_idle_thread(struct perf_session *session)
1910 {
1911         struct thread *thread = machine__idle_thread(&session->machines.host);
1912 
1913         /* machine__idle_thread() got the thread, so put it */
1914         thread__put(thread);
1915         return thread ? 0 : -1;
1916 }
1917 
1918 static void
1919 perf_session__warn_order(const struct perf_session *session)
1920 {
1921         const struct ordered_events *oe = &session->ordered_events;
1922         struct evsel *evsel;
1923         bool should_warn = true;
1924 
1925         evlist__for_each_entry(session->evlist, evsel) {
1926                 if (evsel->core.attr.write_backward)
1927                         should_warn = false;
1928         }
1929 
1930         if (!should_warn)
1931                 return;
1932         if (oe->nr_unordered_events != 0)
1933                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1934 }
1935 
1936 static void perf_session__warn_about_errors(const struct perf_session *session)
1937 {
1938         const struct events_stats *stats = &session->evlist->stats;
1939 
1940         if (session->tool->lost == perf_event__process_lost &&
1941             stats->nr_events[PERF_RECORD_LOST] != 0) {
1942                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1943                             "Check IO/CPU overload!\n\n",
1944                             stats->nr_events[0],
1945                             stats->nr_events[PERF_RECORD_LOST]);
1946         }
1947 
1948         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1949                 double drop_rate;
1950 
1951                 drop_rate = (double)stats->total_lost_samples /
1952                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1953                 if (drop_rate > 0.05) {
1954                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1955                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1956                                     drop_rate * 100.0);
1957                 }
1958         }
1959 
1960         if (session->tool->aux == perf_event__process_aux &&
1961             stats->total_aux_lost != 0) {
1962                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1963                             stats->total_aux_lost,
1964                             stats->nr_events[PERF_RECORD_AUX]);
1965         }
1966 
1967         if (session->tool->aux == perf_event__process_aux &&
1968             stats->total_aux_partial != 0) {
1969                 bool vmm_exclusive = false;
1970 
1971                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1972                                        &vmm_exclusive);
1973 
1974                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1975                             "Are you running a KVM guest in the background?%s\n\n",
1976                             stats->total_aux_partial,
1977                             stats->nr_events[PERF_RECORD_AUX],
1978                             vmm_exclusive ?
1979                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1980                             "will reduce the gaps to only guest's timeslices." :
1981                             "");
1982         }
1983 
1984         if (session->tool->aux == perf_event__process_aux &&
1985             stats->total_aux_collision != 0) {
1986                 ui__warning("AUX data detected collision  %" PRIu64 " times out of %u!\n\n",
1987                             stats->total_aux_collision,
1988                             stats->nr_events[PERF_RECORD_AUX]);
1989         }
1990 
1991         if (stats->nr_unknown_events != 0) {
1992                 ui__warning("Found %u unknown events!\n\n"
1993                             "Is this an older tool processing a perf.data "
1994                             "file generated by a more recent tool?\n\n"
1995                             "If that is not the case, consider "
1996                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1997                             stats->nr_unknown_events);
1998         }
1999 
2000         if (stats->nr_unknown_id != 0) {
2001                 ui__warning("%u samples with id not present in the header\n",
2002                             stats->nr_unknown_id);
2003         }
2004 
2005         if (stats->nr_invalid_chains != 0) {
2006                 ui__warning("Found invalid callchains!\n\n"
2007                             "%u out of %u events were discarded for this reason.\n\n"
2008                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
2009                             stats->nr_invalid_chains,
2010                             stats->nr_events[PERF_RECORD_SAMPLE]);
2011         }
2012 
2013         if (stats->nr_unprocessable_samples != 0) {
2014                 ui__warning("%u unprocessable samples recorded.\n"
2015                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
2016                             stats->nr_unprocessable_samples);
2017         }
2018 
2019         perf_session__warn_order(session);
2020 
2021         events_stats__auxtrace_error_warn(stats);
2022 
2023         if (stats->nr_proc_map_timeout != 0) {
2024                 ui__warning("%d map information files for pre-existing threads were\n"
2025                             "not processed, if there are samples for addresses they\n"
2026                             "will not be resolved, you may find out which are these\n"
2027                             "threads by running with -v and redirecting the output\n"
2028                             "to a file.\n"
2029                             "The time limit to process proc map is too short?\n"
2030                             "Increase it by --proc-map-timeout\n",
2031                             stats->nr_proc_map_timeout);
2032         }
2033 }
2034 
2035 static int perf_session__flush_thread_stack(struct thread *thread,
2036                                             void *p __maybe_unused)
2037 {
2038         return thread_stack__flush(thread);
2039 }
2040 
2041 static int perf_session__flush_thread_stacks(struct perf_session *session)
2042 {
2043         return machines__for_each_thread(&session->machines,
2044                                          perf_session__flush_thread_stack,
2045                                          NULL);
2046 }
2047 
2048 volatile sig_atomic_t session_done;
2049 
2050 static int __perf_session__process_decomp_events(struct perf_session *session);
2051 
2052 static int __perf_session__process_pipe_events(struct perf_session *session)
2053 {
2054         struct ordered_events *oe = &session->ordered_events;
2055         struct perf_tool *tool = session->tool;
2056         struct ui_progress prog;
2057         union perf_event *event;
2058         uint32_t size, cur_size = 0;
2059         void *buf = NULL;
2060         s64 skip = 0;
2061         u64 head;
2062         ssize_t err;
2063         void *p;
2064         bool update_prog = false;
2065 
2066         perf_tool__fill_defaults(tool);
2067 
2068         /*
2069          * If it's from a file saving pipe data (by redirection), it would have
2070          * a file name other than "-".  Then we can get the total size and show
2071          * the progress.
2072          */
2073         if (strcmp(session->data->path, "-") && session->data->file.size) {
2074                 ui_progress__init_size(&prog, session->data->file.size,
2075                                        "Processing events...");
2076                 update_prog = true;
2077         }
2078 
2079         head = 0;
2080         cur_size = sizeof(union perf_event);
2081 
2082         buf = malloc(cur_size);
2083         if (!buf)
2084                 return -errno;
2085         ordered_events__set_copy_on_queue(oe, true);
2086 more:
2087         event = buf;
2088         err = perf_data__read(session->data, event,
2089                               sizeof(struct perf_event_header));
2090         if (err <= 0) {
2091                 if (err == 0)
2092                         goto done;
2093 
2094                 pr_err("failed to read event header\n");
2095                 goto out_err;
2096         }
2097 
2098         if (session->header.needs_swap)
2099                 perf_event_header__bswap(&event->header);
2100 
2101         size = event->header.size;
2102         if (size < sizeof(struct perf_event_header)) {
2103                 pr_err("bad event header size\n");
2104                 goto out_err;
2105         }
2106 
2107         if (size > cur_size) {
2108                 void *new = realloc(buf, size);
2109                 if (!new) {
2110                         pr_err("failed to allocate memory to read event\n");
2111                         goto out_err;
2112                 }
2113                 buf = new;
2114                 cur_size = size;
2115                 event = buf;
2116         }
2117         p = event;
2118         p += sizeof(struct perf_event_header);
2119 
2120         if (size - sizeof(struct perf_event_header)) {
2121                 err = perf_data__read(session->data, p,
2122                                       size - sizeof(struct perf_event_header));
2123                 if (err <= 0) {
2124                         if (err == 0) {
2125                                 pr_err("unexpected end of event stream\n");
2126                                 goto done;
2127                         }
2128 
2129                         pr_err("failed to read event data\n");
2130                         goto out_err;
2131                 }
2132         }
2133 
2134         if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
2135                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2136                        head, event->header.size, event->header.type);
2137                 err = -EINVAL;
2138                 goto out_err;
2139         }
2140 
2141         head += size;
2142 
2143         if (skip > 0)
2144                 head += skip;
2145 
2146         err = __perf_session__process_decomp_events(session);
2147         if (err)
2148                 goto out_err;
2149 
2150         if (update_prog)
2151                 ui_progress__update(&prog, size);
2152 
2153         if (!session_done())
2154                 goto more;
2155 done:
2156         /* do the final flush for ordered samples */
2157         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2158         if (err)
2159                 goto out_err;
2160         err = auxtrace__flush_events(session, tool);
2161         if (err)
2162                 goto out_err;
2163         err = perf_session__flush_thread_stacks(session);
2164 out_err:
2165         free(buf);
2166         if (update_prog)
2167                 ui_progress__finish();
2168         if (!tool->no_warn)
2169                 perf_session__warn_about_errors(session);
2170         ordered_events__free(&session->ordered_events);
2171         auxtrace__free_events(session);
2172         return err;
2173 }
2174 
2175 static union perf_event *
2176 prefetch_event(char *buf, u64 head, size_t mmap_size,
2177                bool needs_swap, union perf_event *error)
2178 {
2179         union perf_event *event;
2180         u16 event_size;
2181 
2182         /*
2183          * Ensure we have enough space remaining to read
2184          * the size of the event in the headers.
2185          */
2186         if (head + sizeof(event->header) > mmap_size)
2187                 return NULL;
2188 
2189         event = (union perf_event *)(buf + head);
2190         if (needs_swap)
2191                 perf_event_header__bswap(&event->header);
2192 
2193         event_size = event->header.size;
2194         if (head + event_size <= mmap_size)
2195                 return event;
2196 
2197         /* We're not fetching the event so swap back again */
2198         if (needs_swap)
2199                 perf_event_header__bswap(&event->header);
2200 
2201         /* Check if the event fits into the next mmapped buf. */
2202         if (event_size <= mmap_size - head % page_size) {
2203                 /* Remap buf and fetch again. */
2204                 return NULL;
2205         }
2206 
2207         /* Invalid input. Event size should never exceed mmap_size. */
2208         pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
2209                  " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
2210 
2211         return error;
2212 }
2213 
2214 static union perf_event *
2215 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2216 {
2217         return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2218 }
2219 
2220 static union perf_event *
2221 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2222 {
2223         return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2224 }
2225 
2226 static int __perf_session__process_decomp_events(struct perf_session *session)
2227 {
2228         s64 skip;
2229         u64 size;
2230         struct decomp *decomp = session->active_decomp->decomp_last;
2231 
2232         if (!decomp)
2233                 return 0;
2234 
2235         while (decomp->head < decomp->size && !session_done()) {
2236                 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2237                                                              session->header.needs_swap);
2238 
2239                 if (!event)
2240                         break;
2241 
2242                 size = event->header.size;
2243 
2244                 if (size < sizeof(struct perf_event_header) ||
2245                     (skip = perf_session__process_event(session, event, decomp->file_pos,
2246                                                         decomp->file_path)) < 0) {
2247                         pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2248                                 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2249                         return -EINVAL;
2250                 }
2251 
2252                 if (skip)
2253                         size += skip;
2254 
2255                 decomp->head += size;
2256         }
2257 
2258         return 0;
2259 }
2260 
2261 /*
2262  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2263  * slices. On 32bit we use 32MB.
2264  */
2265 #if BITS_PER_LONG == 64
2266 #define MMAP_SIZE ULLONG_MAX
2267 #define NUM_MMAPS 1
2268 #else
2269 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2270 #define NUM_MMAPS 128
2271 #endif
2272 
2273 struct reader;
2274 
2275 typedef s64 (*reader_cb_t)(struct perf_session *session,
2276                            union perf_event *event,
2277                            u64 file_offset,
2278                            const char *file_path);
2279 
2280 struct reader {
2281         int              fd;
2282         const char       *path;
2283         u64              data_size;
2284         u64              data_offset;
2285         reader_cb_t      process;
2286         bool             in_place_update;
2287         char             *mmaps[NUM_MMAPS];
2288         size_t           mmap_size;
2289         int              mmap_idx;
2290         char             *mmap_cur;
2291         u64              file_pos;
2292         u64              file_offset;
2293         u64              head;
2294         u64              size;
2295         bool             done;
2296         struct zstd_data   zstd_data;
2297         struct decomp_data decomp_data;
2298 };
2299 
2300 static int
2301 reader__init(struct reader *rd, bool *one_mmap)
2302 {
2303         u64 data_size = rd->data_size;
2304         char **mmaps = rd->mmaps;
2305 
2306         rd->head = rd->data_offset;
2307         data_size += rd->data_offset;
2308 
2309         rd->mmap_size = MMAP_SIZE;
2310         if (rd->mmap_size > data_size) {
2311                 rd->mmap_size = data_size;
2312                 if (one_mmap)
2313                         *one_mmap = true;
2314         }
2315 
2316         memset(mmaps, 0, sizeof(rd->mmaps));
2317 
2318         if (zstd_init(&rd->zstd_data, 0))
2319                 return -1;
2320         rd->decomp_data.zstd_decomp = &rd->zstd_data;
2321 
2322         return 0;
2323 }
2324 
2325 static void
2326 reader__release_decomp(struct reader *rd)
2327 {
2328         perf_decomp__release_events(rd->decomp_data.decomp);
2329         zstd_fini(&rd->zstd_data);
2330 }
2331 
2332 static int
2333 reader__mmap(struct reader *rd, struct perf_session *session)
2334 {
2335         int mmap_prot, mmap_flags;
2336         char *buf, **mmaps = rd->mmaps;
2337         u64 page_offset;
2338 
2339         mmap_prot  = PROT_READ;
2340         mmap_flags = MAP_SHARED;
2341 
2342         if (rd->in_place_update) {
2343                 mmap_prot  |= PROT_WRITE;
2344         } else if (session->header.needs_swap) {
2345                 mmap_prot  |= PROT_WRITE;
2346                 mmap_flags = MAP_PRIVATE;
2347         }
2348 
2349         if (mmaps[rd->mmap_idx]) {
2350                 munmap(mmaps[rd->mmap_idx], rd->mmap_size);
2351                 mmaps[rd->mmap_idx] = NULL;
2352         }
2353 
2354         page_offset = page_size * (rd->head / page_size);
2355         rd->file_offset += page_offset;
2356         rd->head -= page_offset;
2357 
2358         buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
2359                    rd->file_offset);
2360         if (buf == MAP_FAILED) {
2361                 pr_err("failed to mmap file\n");
2362                 return -errno;
2363         }
2364         mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
2365         rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
2366         rd->file_pos = rd->file_offset + rd->head;
2367         if (session->one_mmap) {
2368                 session->one_mmap_addr = buf;
2369                 session->one_mmap_offset = rd->file_offset;
2370         }
2371 
2372         return 0;
2373 }
2374 
2375 enum {
2376         READER_OK,
2377         READER_NODATA,
2378 };
2379 
2380 static int
2381 reader__read_event(struct reader *rd, struct perf_session *session,
2382                    struct ui_progress *prog)
2383 {
2384         u64 size;
2385         int err = READER_OK;
2386         union perf_event *event;
2387         s64 skip;
2388 
2389         event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
2390                                    session->header.needs_swap);
2391         if (IS_ERR(event))
2392                 return PTR_ERR(event);
2393 
2394         if (!event)
2395                 return READER_NODATA;
2396 
2397         size = event->header.size;
2398 
2399         skip = -EINVAL;
2400 
2401         if (size < sizeof(struct perf_event_header) ||
2402             (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
2403                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2404                        rd->file_offset + rd->head, event->header.size,
2405                        event->header.type, strerror(-skip));
2406                 err = skip;
2407                 goto out;
2408         }
2409 
2410         if (skip)
2411                 size += skip;
2412 
2413         rd->size += size;
2414         rd->head += size;
2415         rd->file_pos += size;
2416 
2417         err = __perf_session__process_decomp_events(session);
2418         if (err)
2419                 goto out;
2420 
2421         ui_progress__update(prog, size);
2422 
2423 out:
2424         return err;
2425 }
2426 
2427 static inline bool
2428 reader__eof(struct reader *rd)
2429 {
2430         return (rd->file_pos >= rd->data_size + rd->data_offset);
2431 }
2432 
2433 static int
2434 reader__process_events(struct reader *rd, struct perf_session *session,
2435                        struct ui_progress *prog)
2436 {
2437         int err;
2438 
2439         err = reader__init(rd, &session->one_mmap);
2440         if (err)
2441                 goto out;
2442 
2443         session->active_decomp = &rd->decomp_data;
2444 
2445 remap:
2446         err = reader__mmap(rd, session);
2447         if (err)
2448                 goto out;
2449 
2450 more:
2451         err = reader__read_event(rd, session, prog);
2452         if (err < 0)
2453                 goto out;
2454         else if (err == READER_NODATA)
2455                 goto remap;
2456 
2457         if (session_done())
2458                 goto out;
2459 
2460         if (!reader__eof(rd))
2461                 goto more;
2462 
2463 out:
2464         session->active_decomp = &session->decomp_data;
2465         return err;
2466 }
2467 
2468 static s64 process_simple(struct perf_session *session,
2469                           union perf_event *event,
2470                           u64 file_offset,
2471                           const char *file_path)
2472 {
2473         return perf_session__process_event(session, event, file_offset, file_path);
2474 }
2475 
2476 static int __perf_session__process_events(struct perf_session *session)
2477 {
2478         struct reader rd = {
2479                 .fd             = perf_data__fd(session->data),
2480                 .path           = session->data->file.path,
2481                 .data_size      = session->header.data_size,
2482                 .data_offset    = session->header.data_offset,
2483                 .process        = process_simple,
2484                 .in_place_update = session->data->in_place_update,
2485         };
2486         struct ordered_events *oe = &session->ordered_events;
2487         struct perf_tool *tool = session->tool;
2488         struct ui_progress prog;
2489         int err;
2490 
2491         perf_tool__fill_defaults(tool);
2492 
2493         if (rd.data_size == 0)
2494                 return -1;
2495 
2496         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2497 
2498         err = reader__process_events(&rd, session, &prog);
2499         if (err)
2500                 goto out_err;
2501         /* do the final flush for ordered samples */
2502         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2503         if (err)
2504                 goto out_err;
2505         err = auxtrace__flush_events(session, tool);
2506         if (err)
2507                 goto out_err;
2508         err = perf_session__flush_thread_stacks(session);
2509 out_err:
2510         ui_progress__finish();
2511         if (!tool->no_warn)
2512                 perf_session__warn_about_errors(session);
2513         /*
2514          * We may switching perf.data output, make ordered_events
2515          * reusable.
2516          */
2517         ordered_events__reinit(&session->ordered_events);
2518         auxtrace__free_events(session);
2519         reader__release_decomp(&rd);
2520         session->one_mmap = false;
2521         return err;
2522 }
2523 
2524 /*
2525  * Processing 2 MB of data from each reader in sequence,
2526  * because that's the way the ordered events sorting works
2527  * most efficiently.
2528  */
2529 #define READER_MAX_SIZE (2 * 1024 * 1024)
2530 
2531 /*
2532  * This function reads, merge and process directory data.
2533  * It assumens the version 1 of directory data, where each
2534  * data file holds per-cpu data, already sorted by kernel.
2535  */
2536 static int __perf_session__process_dir_events(struct perf_session *session)
2537 {
2538         struct perf_data *data = session->data;
2539         struct perf_tool *tool = session->tool;
2540         int i, ret, readers, nr_readers;
2541         struct ui_progress prog;
2542         u64 total_size = perf_data__size(session->data);
2543         struct reader *rd;
2544 
2545         perf_tool__fill_defaults(tool);
2546 
2547         ui_progress__init_size(&prog, total_size, "Processing events...");
2548 
2549         nr_readers = 1;
2550         for (i = 0; i < data->dir.nr; i++) {
2551                 if (data->dir.files[i].size)
2552                         nr_readers++;
2553         }
2554 
2555         rd = zalloc(nr_readers * sizeof(struct reader));
2556         if (!rd)
2557                 return -ENOMEM;
2558 
2559         rd[0] = (struct reader) {
2560                 .fd              = perf_data__fd(session->data),
2561                 .path            = session->data->file.path,
2562                 .data_size       = session->header.data_size,
2563                 .data_offset     = session->header.data_offset,
2564                 .process         = process_simple,
2565                 .in_place_update = session->data->in_place_update,
2566         };
2567         ret = reader__init(&rd[0], NULL);
2568         if (ret)
2569                 goto out_err;
2570         ret = reader__mmap(&rd[0], session);
2571         if (ret)
2572                 goto out_err;
2573         readers = 1;
2574 
2575         for (i = 0; i < data->dir.nr; i++) {
2576                 if (!data->dir.files[i].size)
2577                         continue;
2578                 rd[readers] = (struct reader) {
2579                         .fd              = data->dir.files[i].fd,
2580                         .path            = data->dir.files[i].path,
2581                         .data_size       = data->dir.files[i].size,
2582                         .data_offset     = 0,
2583                         .process         = process_simple,
2584                         .in_place_update = session->data->in_place_update,
2585                 };
2586                 ret = reader__init(&rd[readers], NULL);
2587                 if (ret)
2588                         goto out_err;
2589                 ret = reader__mmap(&rd[readers], session);
2590                 if (ret)
2591                         goto out_err;
2592                 readers++;
2593         }
2594 
2595         i = 0;
2596         while (readers) {
2597                 if (session_done())
2598                         break;
2599 
2600                 if (rd[i].done) {
2601                         i = (i + 1) % nr_readers;
2602                         continue;
2603                 }
2604                 if (reader__eof(&rd[i])) {
2605                         rd[i].done = true;
2606                         readers--;
2607                         continue;
2608                 }
2609 
2610                 session->active_decomp = &rd[i].decomp_data;
2611                 ret = reader__read_event(&rd[i], session, &prog);
2612                 if (ret < 0) {
2613                         goto out_err;
2614                 } else if (ret == READER_NODATA) {
2615                         ret = reader__mmap(&rd[i], session);
2616                         if (ret)
2617                                 goto out_err;
2618                 }
2619 
2620                 if (rd[i].size >= READER_MAX_SIZE) {
2621                         rd[i].size = 0;
2622                         i = (i + 1) % nr_readers;
2623                 }
2624         }
2625 
2626         ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL);
2627         if (ret)
2628                 goto out_err;
2629 
2630         ret = perf_session__flush_thread_stacks(session);
2631 out_err:
2632         ui_progress__finish();
2633 
2634         if (!tool->no_warn)
2635                 perf_session__warn_about_errors(session);
2636 
2637         /*
2638          * We may switching perf.data output, make ordered_events
2639          * reusable.
2640          */
2641         ordered_events__reinit(&session->ordered_events);
2642 
2643         session->one_mmap = false;
2644 
2645         session->active_decomp = &session->decomp_data;
2646         for (i = 0; i < nr_readers; i++)
2647                 reader__release_decomp(&rd[i]);
2648         zfree(&rd);
2649 
2650         return ret;
2651 }
2652 
2653 int perf_session__process_events(struct perf_session *session)
2654 {
2655         if (perf_session__register_idle_thread(session) < 0)
2656                 return -ENOMEM;
2657 
2658         if (perf_data__is_pipe(session->data))
2659                 return __perf_session__process_pipe_events(session);
2660 
2661         if (perf_data__is_dir(session->data) && session->data->dir.nr)
2662                 return __perf_session__process_dir_events(session);
2663 
2664         return __perf_session__process_events(session);
2665 }
2666 
2667 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2668 {
2669         struct evsel *evsel;
2670 
2671         evlist__for_each_entry(session->evlist, evsel) {
2672                 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2673                         return true;
2674         }
2675 
2676         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2677         return false;
2678 }
2679 
2680 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2681 {
2682         char *bracket;
2683         struct ref_reloc_sym *ref;
2684         struct kmap *kmap;
2685 
2686         ref = zalloc(sizeof(struct ref_reloc_sym));
2687         if (ref == NULL)
2688                 return -ENOMEM;
2689 
2690         ref->name = strdup(symbol_name);
2691         if (ref->name == NULL) {
2692                 free(ref);
2693                 return -ENOMEM;
2694         }
2695 
2696         bracket = strchr(ref->name, ']');
2697         if (bracket)
2698                 *bracket = '\0';
2699 
2700         ref->addr = addr;
2701 
2702         kmap = map__kmap(map);
2703         if (kmap)
2704                 kmap->ref_reloc_sym = ref;
2705 
2706         return 0;
2707 }
2708 
2709 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2710 {
2711         return machines__fprintf_dsos(&session->machines, fp);
2712 }
2713 
2714 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2715                                           bool (skip)(struct dso *dso, int parm), int parm)
2716 {
2717         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2718 }
2719 
2720 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2721 {
2722         size_t ret;
2723         const char *msg = "";
2724 
2725         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2726                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2727 
2728         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2729 
2730         ret += events_stats__fprintf(&session->evlist->stats, fp);
2731         return ret;
2732 }
2733 
2734 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2735 {
2736         /*
2737          * FIXME: Here we have to actually print all the machines in this
2738          * session, not just the host...
2739          */
2740         return machine__fprintf(&session->machines.host, fp);
2741 }
2742 
2743 void perf_session__dump_kmaps(struct perf_session *session)
2744 {
2745         int save_verbose = verbose;
2746 
2747         fflush(stdout);
2748         fprintf(stderr, "Kernel and module maps:\n");
2749         verbose = 0; /* Suppress verbose to print a summary only */
2750         maps__fprintf(machine__kernel_maps(&session->machines.host), stderr);
2751         verbose = save_verbose;
2752 }
2753 
2754 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2755                                               unsigned int type)
2756 {
2757         struct evsel *pos;
2758 
2759         evlist__for_each_entry(session->evlist, pos) {
2760                 if (pos->core.attr.type == type)
2761                         return pos;
2762         }
2763         return NULL;
2764 }
2765 
2766 int perf_session__cpu_bitmap(struct perf_session *session,
2767                              const char *cpu_list, unsigned long *cpu_bitmap)
2768 {
2769         int i, err = -1;
2770         struct perf_cpu_map *map;
2771         int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2772         struct perf_cpu cpu;
2773 
2774         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2775                 struct evsel *evsel;
2776 
2777                 evsel = perf_session__find_first_evtype(session, i);
2778                 if (!evsel)
2779                         continue;
2780 
2781                 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2782                         pr_err("File does not contain CPU events. "
2783                                "Remove -C option to proceed.\n");
2784                         return -1;
2785                 }
2786         }
2787 
2788         map = perf_cpu_map__new(cpu_list);
2789         if (map == NULL) {
2790                 pr_err("Invalid cpu_list\n");
2791                 return -1;
2792         }
2793 
2794         perf_cpu_map__for_each_cpu(cpu, i, map) {
2795                 if (cpu.cpu >= nr_cpus) {
2796                         pr_err("Requested CPU %d too large. "
2797                                "Consider raising MAX_NR_CPUS\n", cpu.cpu);
2798                         goto out_delete_map;
2799                 }
2800 
2801                 __set_bit(cpu.cpu, cpu_bitmap);
2802         }
2803 
2804         err = 0;
2805 
2806 out_delete_map:
2807         perf_cpu_map__put(map);
2808         return err;
2809 }
2810 
2811 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2812                                 bool full)
2813 {
2814         if (session == NULL || fp == NULL)
2815                 return;
2816 
2817         fprintf(fp, "# ========\n");
2818         perf_header__fprintf_info(session, fp, full);
2819         fprintf(fp, "# ========\n#\n");
2820 }
2821 
2822 static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
2823 {
2824         struct machine *machine = machines__findnew(&session->machines, machine_pid);
2825         struct thread *thread;
2826 
2827         if (!machine)
2828                 return -ENOMEM;
2829 
2830         machine->single_address_space = session->machines.host.single_address_space;
2831 
2832         thread = machine__idle_thread(machine);
2833         if (!thread)
2834                 return -ENOMEM;
2835         thread__put(thread);
2836 
2837         machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
2838 
2839         return 0;
2840 }
2841 
2842 static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
2843                                        pid_t tid, int guest_cpu)
2844 {
2845         struct machine *machine = &session->machines.host;
2846         struct thread *thread = machine__findnew_thread(machine, pid, tid);
2847 
2848         if (!thread)
2849                 return -ENOMEM;
2850         thread__set_guest_cpu(thread, guest_cpu);
2851         thread__put(thread);
2852 
2853         return 0;
2854 }
2855 
2856 int perf_event__process_id_index(struct perf_session *session,
2857                                  union perf_event *event)
2858 {
2859         struct evlist *evlist = session->evlist;
2860         struct perf_record_id_index *ie = &event->id_index;
2861         size_t sz = ie->header.size - sizeof(*ie);
2862         size_t i, nr, max_nr;
2863         size_t e1_sz = sizeof(struct id_index_entry);
2864         size_t e2_sz = sizeof(struct id_index_entry_2);
2865         size_t etot_sz = e1_sz + e2_sz;
2866         struct id_index_entry_2 *e2;
2867         pid_t last_pid = 0;
2868 
2869         max_nr = sz / e1_sz;
2870         nr = ie->nr;
2871         if (nr > max_nr) {
2872                 printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
2873                 return -EINVAL;
2874         }
2875 
2876         if (sz >= nr * etot_sz) {
2877                 max_nr = sz / etot_sz;
2878                 if (nr > max_nr) {
2879                         printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
2880                         return -EINVAL;
2881                 }
2882                 e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
2883         } else {
2884                 e2 = NULL;
2885         }
2886 
2887         if (dump_trace)
2888                 fprintf(stdout, " nr: %zu\n", nr);
2889 
2890         for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
2891                 struct id_index_entry *e = &ie->entries[i];
2892                 struct perf_sample_id *sid;
2893                 int ret;
2894 
2895                 if (dump_trace) {
2896                         fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2897                         fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2898                         fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2899                         fprintf(stdout, "  tid: %"PRI_ld64, e->tid);
2900                         if (e2) {
2901                                 fprintf(stdout, "  machine_pid: %"PRI_ld64, e2->machine_pid);
2902                                 fprintf(stdout, "  vcpu: %"PRI_lu64"\n", e2->vcpu);
2903                         } else {
2904                                 fprintf(stdout, "\n");
2905                         }
2906                 }
2907 
2908                 sid = evlist__id2sid(evlist, e->id);
2909                 if (!sid)
2910                         return -ENOENT;
2911 
2912                 sid->idx = e->idx;
2913                 sid->cpu.cpu = e->cpu;
2914                 sid->tid = e->tid;
2915 
2916                 if (!e2)
2917                         continue;
2918 
2919                 sid->machine_pid = e2->machine_pid;
2920                 sid->vcpu.cpu = e2->vcpu;
2921 
2922                 if (!sid->machine_pid)
2923                         continue;
2924 
2925                 if (sid->machine_pid != last_pid) {
2926                         ret = perf_session__register_guest(session, sid->machine_pid);
2927                         if (ret)
2928                                 return ret;
2929                         last_pid = sid->machine_pid;
2930                         perf_guest = true;
2931                 }
2932 
2933                 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
2934                 if (ret)
2935                         return ret;
2936         }
2937         return 0;
2938 }
2939 
2940 int perf_session__dsos_hit_all(struct perf_session *session)
2941 {
2942         struct rb_node *nd;
2943         int err;
2944 
2945         err = machine__hit_all_dsos(&session->machines.host);
2946         if (err)
2947                 return err;
2948 
2949         for (nd = rb_first_cached(&session->machines.guests); nd;
2950              nd = rb_next(nd)) {
2951                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2952 
2953                 err = machine__hit_all_dsos(pos);
2954                 if (err)
2955                         return err;
2956         }
2957 
2958         return 0;
2959 }
2960 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php