1 /* SPDX-License-Identifier: GPL-2.0 */ << 2 #ifndef __PERF_RECORD_H 1 #ifndef __PERF_RECORD_H 3 #define __PERF_RECORD_H 2 #define __PERF_RECORD_H 4 /* !! 3 5 * The linux/stddef.h isn't need here, but is !! 4 #include <limits.h> 6 * in files included from uapi/linux/perf_even << 7 * /usr/include/linux/swab.h and /usr/include/ << 8 * detected in at least musl libc, used in Alp << 9 */ << 10 #include <stdio.h> 5 #include <stdio.h> 11 #include <linux/stddef.h> !! 6 #include <linux/kernel.h> 12 #include <perf/event.h> !! 7 13 #include <linux/types.h> !! 8 #include "../perf.h" 14 !! 9 #include "build-id.h" 15 struct dso; !! 10 #include "perf_regs.h" 16 struct machine; !! 11 17 struct perf_event_attr; !! 12 struct mmap_event { 18 struct perf_sample; !! 13 struct perf_event_header header; >> 14 u32 pid, tid; >> 15 u64 start; >> 16 u64 len; >> 17 u64 pgoff; >> 18 char filename[PATH_MAX]; >> 19 }; >> 20 >> 21 struct mmap2_event { >> 22 struct perf_event_header header; >> 23 u32 pid, tid; >> 24 u64 start; >> 25 u64 len; >> 26 u64 pgoff; >> 27 u32 maj; >> 28 u32 min; >> 29 u64 ino; >> 30 u64 ino_generation; >> 31 u32 prot; >> 32 u32 flags; >> 33 char filename[PATH_MAX]; >> 34 }; >> 35 >> 36 struct comm_event { >> 37 struct perf_event_header header; >> 38 u32 pid, tid; >> 39 char comm[16]; >> 40 }; >> 41 >> 42 struct namespaces_event { >> 43 struct perf_event_header header; >> 44 u32 pid, tid; >> 45 u64 nr_namespaces; >> 46 struct perf_ns_link_info link_info[]; >> 47 }; >> 48 >> 49 struct fork_event { >> 50 struct perf_event_header header; >> 51 u32 pid, ppid; >> 52 u32 tid, ptid; >> 53 u64 time; >> 54 }; >> 55 >> 56 struct lost_event { >> 57 struct perf_event_header header; >> 58 u64 id; >> 59 u64 lost; >> 60 }; >> 61 >> 62 struct lost_samples_event { >> 63 struct perf_event_header header; >> 64 u64 lost; >> 65 }; 19 66 20 #ifdef __LP64__ << 21 /* 67 /* 22 * /usr/include/inttypes.h uses just 'lu' for !! 68 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID 23 * __u64 as long long unsigned int, and then - << 24 * complains of the mismatched types, so use t << 25 * macros to overcome that. << 26 */ 69 */ 27 #define PRI_lu64 "l" PRIu64 !! 70 struct read_event { 28 #define PRI_lx64 "l" PRIx64 !! 71 struct perf_event_header header; 29 #define PRI_ld64 "l" PRId64 !! 72 u32 pid, tid; 30 #else !! 73 u64 value; 31 #define PRI_lu64 PRIu64 !! 74 u64 time_enabled; 32 #define PRI_lx64 PRIx64 !! 75 u64 time_running; 33 #define PRI_ld64 PRId64 !! 76 u64 id; 34 #endif !! 77 }; >> 78 >> 79 struct throttle_event { >> 80 struct perf_event_header header; >> 81 u64 time; >> 82 u64 id; >> 83 u64 stream_id; >> 84 }; 35 85 36 #define PERF_SAMPLE_MASK 86 #define PERF_SAMPLE_MASK \ 37 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | 87 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \ 38 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | 88 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \ 39 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID 89 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \ 40 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD 90 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \ 41 PERF_SAMPLE_IDENTIFIER) 91 PERF_SAMPLE_IDENTIFIER) 42 92 43 /* perf sample has 16 bits size limit */ 93 /* perf sample has 16 bits size limit */ 44 #define PERF_SAMPLE_MAX_SIZE (1 << 16) 94 #define PERF_SAMPLE_MAX_SIZE (1 << 16) 45 95 >> 96 struct sample_event { >> 97 struct perf_event_header header; >> 98 u64 array[]; >> 99 }; >> 100 >> 101 struct regs_dump { >> 102 u64 abi; >> 103 u64 mask; >> 104 u64 *regs; >> 105 >> 106 /* Cached values/mask filled by first register access. */ >> 107 u64 cache_regs[PERF_REGS_MAX]; >> 108 u64 cache_mask; >> 109 }; >> 110 >> 111 struct stack_dump { >> 112 u16 offset; >> 113 u64 size; >> 114 char *data; >> 115 }; >> 116 >> 117 struct sample_read_value { >> 118 u64 value; >> 119 u64 id; >> 120 }; >> 121 >> 122 struct sample_read { >> 123 u64 time_enabled; >> 124 u64 time_running; >> 125 union { >> 126 struct { >> 127 u64 nr; >> 128 struct sample_read_value *values; >> 129 } group; >> 130 struct sample_read_value one; >> 131 }; >> 132 }; >> 133 46 struct ip_callchain { 134 struct ip_callchain { 47 u64 nr; 135 u64 nr; 48 u64 ips[]; !! 136 u64 ips[0]; 49 }; 137 }; 50 138 51 struct branch_stack; !! 139 struct branch_flags { >> 140 u64 mispred:1; >> 141 u64 predicted:1; >> 142 u64 in_tx:1; >> 143 u64 abort:1; >> 144 u64 cycles:16; >> 145 u64 reserved:44; >> 146 }; >> 147 >> 148 struct branch_entry { >> 149 u64 from; >> 150 u64 to; >> 151 struct branch_flags flags; >> 152 }; >> 153 >> 154 struct branch_stack { >> 155 u64 nr; >> 156 struct branch_entry entries[0]; >> 157 }; 52 158 53 enum { 159 enum { 54 PERF_IP_FLAG_BRANCH = 1ULL 160 PERF_IP_FLAG_BRANCH = 1ULL << 0, 55 PERF_IP_FLAG_CALL = 1ULL 161 PERF_IP_FLAG_CALL = 1ULL << 1, 56 PERF_IP_FLAG_RETURN = 1ULL 162 PERF_IP_FLAG_RETURN = 1ULL << 2, 57 PERF_IP_FLAG_CONDITIONAL = 1ULL 163 PERF_IP_FLAG_CONDITIONAL = 1ULL << 3, 58 PERF_IP_FLAG_SYSCALLRET = 1ULL 164 PERF_IP_FLAG_SYSCALLRET = 1ULL << 4, 59 PERF_IP_FLAG_ASYNC = 1ULL 165 PERF_IP_FLAG_ASYNC = 1ULL << 5, 60 PERF_IP_FLAG_INTERRUPT = 1ULL 166 PERF_IP_FLAG_INTERRUPT = 1ULL << 6, 61 PERF_IP_FLAG_TX_ABORT = 1ULL 167 PERF_IP_FLAG_TX_ABORT = 1ULL << 7, 62 PERF_IP_FLAG_TRACE_BEGIN = 1ULL 168 PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8, 63 PERF_IP_FLAG_TRACE_END = 1ULL 169 PERF_IP_FLAG_TRACE_END = 1ULL << 9, 64 PERF_IP_FLAG_IN_TX = 1ULL 170 PERF_IP_FLAG_IN_TX = 1ULL << 10, 65 PERF_IP_FLAG_VMENTRY = 1ULL << 66 PERF_IP_FLAG_VMEXIT = 1ULL << 67 PERF_IP_FLAG_INTR_DISABLE = 1ULL << 68 PERF_IP_FLAG_INTR_TOGGLE = 1ULL << 69 }; 171 }; 70 172 71 #define PERF_IP_FLAG_CHARS "bcrosyiABExghDt" !! 173 #define PERF_IP_FLAG_CHARS "bcrosyiABEx" 72 174 73 #define PERF_BRANCH_MASK (\ 175 #define PERF_BRANCH_MASK (\ 74 PERF_IP_FLAG_BRANCH |\ 176 PERF_IP_FLAG_BRANCH |\ 75 PERF_IP_FLAG_CALL |\ 177 PERF_IP_FLAG_CALL |\ 76 PERF_IP_FLAG_RETURN |\ 178 PERF_IP_FLAG_RETURN |\ 77 PERF_IP_FLAG_CONDITIONAL |\ 179 PERF_IP_FLAG_CONDITIONAL |\ 78 PERF_IP_FLAG_SYSCALLRET |\ 180 PERF_IP_FLAG_SYSCALLRET |\ 79 PERF_IP_FLAG_ASYNC |\ 181 PERF_IP_FLAG_ASYNC |\ 80 PERF_IP_FLAG_INTERRUPT |\ 182 PERF_IP_FLAG_INTERRUPT |\ 81 PERF_IP_FLAG_TX_ABORT |\ 183 PERF_IP_FLAG_TX_ABORT |\ 82 PERF_IP_FLAG_TRACE_BEGIN |\ 184 PERF_IP_FLAG_TRACE_BEGIN |\ 83 PERF_IP_FLAG_TRACE_END |\ !! 185 PERF_IP_FLAG_TRACE_END) 84 PERF_IP_FLAG_VMENTRY |\ !! 186 85 PERF_IP_FLAG_VMEXIT) !! 187 #define MAX_INSN 16 >> 188 >> 189 struct perf_sample { >> 190 u64 ip; >> 191 u32 pid, tid; >> 192 u64 time; >> 193 u64 addr; >> 194 u64 id; >> 195 u64 stream_id; >> 196 u64 period; >> 197 u64 weight; >> 198 u64 transaction; >> 199 u32 cpu; >> 200 u32 raw_size; >> 201 u64 data_src; >> 202 u32 flags; >> 203 u16 insn_len; >> 204 u8 cpumode; >> 205 char insn[MAX_INSN]; >> 206 void *raw_data; >> 207 struct ip_callchain *callchain; >> 208 struct branch_stack *branch_stack; >> 209 struct regs_dump user_regs; >> 210 struct regs_dump intr_regs; >> 211 struct stack_dump user_stack; >> 212 struct sample_read read; >> 213 }; 86 214 87 #define PERF_MEM_DATA_SRC_NONE \ 215 #define PERF_MEM_DATA_SRC_NONE \ 88 (PERF_MEM_S(OP, NA) |\ 216 (PERF_MEM_S(OP, NA) |\ 89 PERF_MEM_S(LVL, NA) |\ 217 PERF_MEM_S(LVL, NA) |\ 90 PERF_MEM_S(SNOOP, NA) |\ 218 PERF_MEM_S(SNOOP, NA) |\ 91 PERF_MEM_S(LOCK, NA) |\ 219 PERF_MEM_S(LOCK, NA) |\ 92 PERF_MEM_S(TLB, NA) |\ !! 220 PERF_MEM_S(TLB, NA)) 93 PERF_MEM_S(LVLNUM, NA)) << 94 << 95 /* Attribute type for custom synthesized event << 96 #define PERF_TYPE_SYNTH (INT_MAX + 1U) << 97 221 98 /* Attribute config for custom synthesized eve !! 222 struct build_id_event { 99 enum perf_synth_id { !! 223 struct perf_event_header header; 100 PERF_SYNTH_INTEL_PTWRITE, !! 224 pid_t pid; 101 PERF_SYNTH_INTEL_MWAIT, !! 225 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 102 PERF_SYNTH_INTEL_PWRE, !! 226 char filename[]; 103 PERF_SYNTH_INTEL_EXSTOP, !! 227 }; 104 PERF_SYNTH_INTEL_PWRX, !! 228 105 PERF_SYNTH_INTEL_CBR, !! 229 enum perf_user_event_type { /* above any possible kernel type */ 106 PERF_SYNTH_INTEL_PSB, !! 230 PERF_RECORD_USER_TYPE_START = 64, 107 PERF_SYNTH_INTEL_EVT, !! 231 PERF_RECORD_HEADER_ATTR = 64, 108 PERF_SYNTH_INTEL_IFLAG_CHG, !! 232 PERF_RECORD_HEADER_EVENT_TYPE = 65, /* deprecated */ >> 233 PERF_RECORD_HEADER_TRACING_DATA = 66, >> 234 PERF_RECORD_HEADER_BUILD_ID = 67, >> 235 PERF_RECORD_FINISHED_ROUND = 68, >> 236 PERF_RECORD_ID_INDEX = 69, >> 237 PERF_RECORD_AUXTRACE_INFO = 70, >> 238 PERF_RECORD_AUXTRACE = 71, >> 239 PERF_RECORD_AUXTRACE_ERROR = 72, >> 240 PERF_RECORD_THREAD_MAP = 73, >> 241 PERF_RECORD_CPU_MAP = 74, >> 242 PERF_RECORD_STAT_CONFIG = 75, >> 243 PERF_RECORD_STAT = 76, >> 244 PERF_RECORD_STAT_ROUND = 77, >> 245 PERF_RECORD_EVENT_UPDATE = 78, >> 246 PERF_RECORD_TIME_CONV = 79, >> 247 PERF_RECORD_HEADER_MAX >> 248 }; >> 249 >> 250 enum auxtrace_error_type { >> 251 PERF_AUXTRACE_ERROR_ITRACE = 1, >> 252 PERF_AUXTRACE_ERROR_MAX 109 }; 253 }; 110 254 111 /* 255 /* 112 * Raw data formats for synthesized events. No !! 256 * The kernel collects the number of events it couldn't send in a stretch and 113 * present to match the 'size' member of PERF_ !! 257 * when possible sends this number in a PERF_RECORD_LOST event. The number of 114 * 8-byte aligned. That means we must derefere !! 258 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while 115 * Refer perf_sample__synth_ptr() and perf_syn !! 259 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is 116 * structure sizes are 4 bytes bigger than the !! 260 * the sum of all struct lost_event.lost fields reported. 117 * perf_synth__raw_size(). !! 261 * >> 262 * The kernel discards mixed up samples and sends the number in a >> 263 * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored >> 264 * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells >> 265 * exactly how many samples the kernel in fact dropped, i.e. it is the sum of >> 266 * all struct lost_samples_event.lost fields reported. >> 267 * >> 268 * The total_period is needed because by default auto-freq is used, so >> 269 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get >> 270 * the total number of low level events, it is necessary to to sum all struct >> 271 * sample_event.period and stash the result in total_period. 118 */ 272 */ >> 273 struct events_stats { >> 274 u64 total_period; >> 275 u64 total_non_filtered_period; >> 276 u64 total_lost; >> 277 u64 total_lost_samples; >> 278 u64 total_aux_lost; >> 279 u64 total_aux_partial; >> 280 u64 total_invalid_chains; >> 281 u32 nr_events[PERF_RECORD_HEADER_MAX]; >> 282 u32 nr_non_filtered_samples; >> 283 u32 nr_lost_warned; >> 284 u32 nr_unknown_events; >> 285 u32 nr_invalid_chains; >> 286 u32 nr_unknown_id; >> 287 u32 nr_unprocessable_samples; >> 288 u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX]; >> 289 u32 nr_proc_map_timeout; >> 290 }; 119 291 120 struct perf_synth_intel_ptwrite { !! 292 enum { 121 u32 padding; !! 293 PERF_CPU_MAP__CPUS = 0, 122 union { !! 294 PERF_CPU_MAP__MASK = 1, 123 struct { << 124 u32 ip << 125 reserved << 126 }; << 127 u32 flags; << 128 }; << 129 u64 payload; << 130 }; 295 }; 131 296 132 struct perf_synth_intel_mwait { !! 297 struct cpu_map_entries { 133 u32 padding; !! 298 u16 nr; 134 u32 reserved; !! 299 u16 cpu[]; 135 union { << 136 struct { << 137 u64 hints << 138 reserved1 << 139 extensions << 140 reserved2 << 141 }; << 142 u64 payload; << 143 }; << 144 }; 300 }; 145 301 146 struct perf_synth_intel_pwre { !! 302 struct cpu_map_mask { 147 u32 padding; !! 303 u16 nr; 148 u32 reserved; !! 304 u16 long_size; 149 union { !! 305 unsigned long mask[]; 150 struct { << 151 u64 reserved1 << 152 hw << 153 subcstate << 154 cstate << 155 reserved2 << 156 }; << 157 u64 payload; << 158 }; << 159 }; 306 }; 160 307 161 struct perf_synth_intel_exstop { !! 308 struct cpu_map_data { 162 u32 padding; !! 309 u16 type; 163 union { !! 310 char data[]; 164 struct { << 165 u32 ip << 166 reserved << 167 }; << 168 u32 flags; << 169 }; << 170 }; 311 }; 171 312 172 struct perf_synth_intel_pwrx { !! 313 struct cpu_map_event { 173 u32 padding; !! 314 struct perf_event_header header; 174 u32 reserved; !! 315 struct cpu_map_data data; 175 union { << 176 struct { << 177 u64 deepest_cstate << 178 last_cstate << 179 wake_reason << 180 reserved1 << 181 }; << 182 u64 payload; << 183 }; << 184 }; 316 }; 185 317 186 struct perf_synth_intel_cbr { !! 318 struct attr_event { 187 u32 padding; !! 319 struct perf_event_header header; 188 union { !! 320 struct perf_event_attr attr; 189 struct { !! 321 u64 id[]; 190 u32 cbr !! 322 }; 191 reserved1 !! 323 192 max_nonturbo !! 324 enum { 193 reserved2 !! 325 PERF_EVENT_UPDATE__UNIT = 0, 194 }; !! 326 PERF_EVENT_UPDATE__SCALE = 1, 195 u32 flags; !! 327 PERF_EVENT_UPDATE__NAME = 2, 196 }; !! 328 PERF_EVENT_UPDATE__CPUS = 3, 197 u32 freq; !! 329 }; 198 u32 reserved3; !! 330 >> 331 struct event_update_event_cpus { >> 332 struct cpu_map_data cpus; >> 333 }; >> 334 >> 335 struct event_update_event_scale { >> 336 double scale; >> 337 }; >> 338 >> 339 struct event_update_event { >> 340 struct perf_event_header header; >> 341 u64 type; >> 342 u64 id; >> 343 >> 344 char data[]; >> 345 }; >> 346 >> 347 #define MAX_EVENT_NAME 64 >> 348 >> 349 struct perf_trace_event_type { >> 350 u64 event_id; >> 351 char name[MAX_EVENT_NAME]; 199 }; 352 }; 200 353 201 struct perf_synth_intel_psb { !! 354 struct event_type_event { 202 u32 padding; !! 355 struct perf_event_header header; 203 u32 reserved; !! 356 struct perf_trace_event_type event_type; >> 357 }; >> 358 >> 359 struct tracing_data_event { >> 360 struct perf_event_header header; >> 361 u32 size; >> 362 }; >> 363 >> 364 struct id_index_entry { >> 365 u64 id; >> 366 u64 idx; >> 367 u64 cpu; >> 368 u64 tid; >> 369 }; >> 370 >> 371 struct id_index_event { >> 372 struct perf_event_header header; >> 373 u64 nr; >> 374 struct id_index_entry entries[0]; >> 375 }; >> 376 >> 377 struct auxtrace_info_event { >> 378 struct perf_event_header header; >> 379 u32 type; >> 380 u32 reserved__; /* For alignment */ >> 381 u64 priv[]; >> 382 }; >> 383 >> 384 struct auxtrace_event { >> 385 struct perf_event_header header; >> 386 u64 size; 204 u64 offset; 387 u64 offset; >> 388 u64 reference; >> 389 u32 idx; >> 390 u32 tid; >> 391 u32 cpu; >> 392 u32 reserved__; /* For alignment */ 205 }; 393 }; 206 394 207 struct perf_synth_intel_evd { !! 395 #define MAX_AUXTRACE_ERROR_MSG 64 208 union { !! 396 209 struct { !! 397 struct auxtrace_error_event { 210 u8 evd_type; !! 398 struct perf_event_header header; 211 u8 reserved[7]; !! 399 u32 type; 212 }; !! 400 u32 code; 213 u64 et; !! 401 u32 cpu; 214 }; !! 402 u32 pid; 215 u64 payload; !! 403 u32 tid; >> 404 u32 reserved__; /* For alignment */ >> 405 u64 ip; >> 406 char msg[MAX_AUXTRACE_ERROR_MSG]; 216 }; 407 }; 217 408 218 /* Intel PT Event Trace */ !! 409 struct aux_event { 219 struct perf_synth_intel_evt { !! 410 struct perf_event_header header; 220 u32 padding; !! 411 u64 aux_offset; 221 union { !! 412 u64 aux_size; 222 struct { !! 413 u64 flags; 223 u32 type !! 414 }; 224 reserved !! 415 225 ip !! 416 struct itrace_start_event { 226 vector !! 417 struct perf_event_header header; 227 evd_cnt !! 418 u32 pid, tid; 228 }; !! 419 }; 229 u32 cfe; !! 420 230 }; !! 421 struct context_switch_event { 231 struct perf_synth_intel_evd evd[0]; !! 422 struct perf_event_header header; >> 423 u32 next_prev_pid; >> 424 u32 next_prev_tid; >> 425 }; >> 426 >> 427 struct thread_map_event_entry { >> 428 u64 pid; >> 429 char comm[16]; >> 430 }; >> 431 >> 432 struct thread_map_event { >> 433 struct perf_event_header header; >> 434 u64 nr; >> 435 struct thread_map_event_entry entries[]; >> 436 }; >> 437 >> 438 enum { >> 439 PERF_STAT_CONFIG_TERM__AGGR_MODE = 0, >> 440 PERF_STAT_CONFIG_TERM__INTERVAL = 1, >> 441 PERF_STAT_CONFIG_TERM__SCALE = 2, >> 442 PERF_STAT_CONFIG_TERM__MAX = 3, >> 443 }; >> 444 >> 445 struct stat_config_event_entry { >> 446 u64 tag; >> 447 u64 val; >> 448 }; >> 449 >> 450 struct stat_config_event { >> 451 struct perf_event_header header; >> 452 u64 nr; >> 453 struct stat_config_event_entry data[]; 232 }; 454 }; 233 455 234 struct perf_synth_intel_iflag_chg { !! 456 struct stat_event { 235 u32 padding; !! 457 struct perf_event_header header; >> 458 >> 459 u64 id; >> 460 u32 cpu; >> 461 u32 thread; >> 462 236 union { 463 union { 237 struct { 464 struct { 238 u32 iflag !! 465 u64 val; 239 via_branch !! 466 u64 ena; >> 467 u64 run; 240 }; 468 }; 241 u32 flags; !! 469 u64 values[3]; 242 }; 470 }; 243 u64 branch_ip; /* If via_branch */ << 244 }; 471 }; 245 472 246 static inline void *perf_synth__raw_data(void << 247 { << 248 return p + 4; << 249 } << 250 << 251 #define perf_synth__raw_size(d) (sizeof(d) - 4 << 252 << 253 #define perf_sample__bad_synth_size(s, d) ((s) << 254 << 255 enum { 473 enum { 256 PERF_STAT_ROUND_TYPE__INTERVAL = 0, 474 PERF_STAT_ROUND_TYPE__INTERVAL = 0, 257 PERF_STAT_ROUND_TYPE__FINAL = 1, 475 PERF_STAT_ROUND_TYPE__FINAL = 1, 258 }; 476 }; 259 477 >> 478 struct stat_round_event { >> 479 struct perf_event_header header; >> 480 u64 type; >> 481 u64 time; >> 482 }; >> 483 >> 484 struct time_conv_event { >> 485 struct perf_event_header header; >> 486 u64 time_shift; >> 487 u64 time_mult; >> 488 u64 time_zero; >> 489 }; >> 490 >> 491 union perf_event { >> 492 struct perf_event_header header; >> 493 struct mmap_event mmap; >> 494 struct mmap2_event mmap2; >> 495 struct comm_event comm; >> 496 struct namespaces_event namespaces; >> 497 struct fork_event fork; >> 498 struct lost_event lost; >> 499 struct lost_samples_event lost_samples; >> 500 struct read_event read; >> 501 struct throttle_event throttle; >> 502 struct sample_event sample; >> 503 struct attr_event attr; >> 504 struct event_update_event event_update; >> 505 struct event_type_event event_type; >> 506 struct tracing_data_event tracing_data; >> 507 struct build_id_event build_id; >> 508 struct id_index_event id_index; >> 509 struct auxtrace_info_event auxtrace_info; >> 510 struct auxtrace_event auxtrace; >> 511 struct auxtrace_error_event auxtrace_error; >> 512 struct aux_event aux; >> 513 struct itrace_start_event itrace_start; >> 514 struct context_switch_event context_switch; >> 515 struct thread_map_event thread_map; >> 516 struct cpu_map_event cpu_map; >> 517 struct stat_config_event stat_config; >> 518 struct stat_event stat; >> 519 struct stat_round_event stat_round; >> 520 struct time_conv_event time_conv; >> 521 }; >> 522 260 void perf_event__print_totals(void); 523 void perf_event__print_totals(void); 261 524 262 struct perf_cpu_map; << 263 struct perf_record_stat_config; << 264 struct perf_stat_config; << 265 struct perf_tool; 525 struct perf_tool; >> 526 struct thread_map; >> 527 struct cpu_map; >> 528 struct perf_stat_config; >> 529 struct perf_counts_values; 266 530 >> 531 typedef int (*perf_event__handler_t)(struct perf_tool *tool, >> 532 union perf_event *event, >> 533 struct perf_sample *sample, >> 534 struct machine *machine); >> 535 >> 536 int perf_event__synthesize_thread_map(struct perf_tool *tool, >> 537 struct thread_map *threads, >> 538 perf_event__handler_t process, >> 539 struct machine *machine, bool mmap_data, >> 540 unsigned int proc_map_timeout); >> 541 int perf_event__synthesize_thread_map2(struct perf_tool *tool, >> 542 struct thread_map *threads, >> 543 perf_event__handler_t process, >> 544 struct machine *machine); >> 545 int perf_event__synthesize_cpu_map(struct perf_tool *tool, >> 546 struct cpu_map *cpus, >> 547 perf_event__handler_t process, >> 548 struct machine *machine); >> 549 int perf_event__synthesize_threads(struct perf_tool *tool, >> 550 perf_event__handler_t process, >> 551 struct machine *machine, bool mmap_data, >> 552 unsigned int proc_map_timeout); >> 553 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, >> 554 perf_event__handler_t process, >> 555 struct machine *machine); >> 556 int perf_event__synthesize_stat_config(struct perf_tool *tool, >> 557 struct perf_stat_config *config, >> 558 perf_event__handler_t process, >> 559 struct machine *machine); 267 void perf_event__read_stat_config(struct perf_ 560 void perf_event__read_stat_config(struct perf_stat_config *config, 268 struct perf_ !! 561 struct stat_config_event *event); >> 562 int perf_event__synthesize_stat(struct perf_tool *tool, >> 563 u32 cpu, u32 thread, u64 id, >> 564 struct perf_counts_values *count, >> 565 perf_event__handler_t process, >> 566 struct machine *machine); >> 567 int perf_event__synthesize_stat_round(struct perf_tool *tool, >> 568 u64 time, u64 type, >> 569 perf_event__handler_t process, >> 570 struct machine *machine); >> 571 int perf_event__synthesize_modules(struct perf_tool *tool, >> 572 perf_event__handler_t process, >> 573 struct machine *machine); 269 574 270 int perf_event__process_comm(const struct perf !! 575 int perf_event__process_comm(struct perf_tool *tool, 271 union perf_event 576 union perf_event *event, 272 struct perf_sampl 577 struct perf_sample *sample, 273 struct machine *m 578 struct machine *machine); 274 int perf_event__process_lost(const struct perf !! 579 int perf_event__process_lost(struct perf_tool *tool, 275 union perf_event 580 union perf_event *event, 276 struct perf_sampl 581 struct perf_sample *sample, 277 struct machine *m 582 struct machine *machine); 278 int perf_event__process_lost_samples(const str !! 583 int perf_event__process_lost_samples(struct perf_tool *tool, 279 union per 584 union perf_event *event, 280 struct pe 585 struct perf_sample *sample, 281 struct ma 586 struct machine *machine); 282 int perf_event__process_aux(const struct perf_ !! 587 int perf_event__process_aux(struct perf_tool *tool, 283 union perf_event * 588 union perf_event *event, 284 struct perf_sample 589 struct perf_sample *sample, 285 struct machine *ma 590 struct machine *machine); 286 int perf_event__process_itrace_start(const str !! 591 int perf_event__process_itrace_start(struct perf_tool *tool, 287 union per 592 union perf_event *event, 288 struct pe 593 struct perf_sample *sample, 289 struct ma 594 struct machine *machine); 290 int perf_event__process_aux_output_hw_id(const !! 595 int perf_event__process_switch(struct perf_tool *tool, 291 union << 292 struc << 293 struc << 294 int perf_event__process_switch(const struct pe << 295 union perf_even 596 union perf_event *event, 296 struct perf_sam 597 struct perf_sample *sample, 297 struct machine 598 struct machine *machine); 298 int perf_event__process_namespaces(const struc !! 599 int perf_event__process_namespaces(struct perf_tool *tool, 299 union perf_ 600 union perf_event *event, 300 struct perf 601 struct perf_sample *sample, 301 struct mach 602 struct machine *machine); 302 int perf_event__process_cgroup(const struct pe !! 603 int perf_event__process_mmap(struct perf_tool *tool, 303 union perf_even << 304 struct perf_sam << 305 struct machine << 306 int perf_event__process_mmap(const struct perf << 307 union perf_event 604 union perf_event *event, 308 struct perf_sampl 605 struct perf_sample *sample, 309 struct machine *m 606 struct machine *machine); 310 int perf_event__process_mmap2(const struct per !! 607 int perf_event__process_mmap2(struct perf_tool *tool, 311 union perf_event 608 union perf_event *event, 312 struct perf_sampl 609 struct perf_sample *sample, 313 struct machine *m 610 struct machine *machine); 314 int perf_event__process_fork(const struct perf !! 611 int perf_event__process_fork(struct perf_tool *tool, 315 union perf_event 612 union perf_event *event, 316 struct perf_sampl 613 struct perf_sample *sample, 317 struct machine *m 614 struct machine *machine); 318 int perf_event__process_exit(const struct perf !! 615 int perf_event__process_exit(struct perf_tool *tool, 319 union perf_event 616 union perf_event *event, 320 struct perf_sampl 617 struct perf_sample *sample, 321 struct machine *m 618 struct machine *machine); 322 int perf_event__exit_del_thread(const struct p !! 619 int perf_event__process(struct perf_tool *tool, 323 union perf_eve << 324 struct perf_sa << 325 struct machine << 326 int perf_event__process_ksymbol(const struct p << 327 union perf_eve << 328 struct perf_sa << 329 struct machine << 330 int perf_event__process_bpf(const struct perf_ << 331 union perf_event * << 332 struct perf_sample << 333 struct machine *ma << 334 int perf_event__process_text_poke(const struct << 335 union perf_e << 336 struct perf_ << 337 struct machi << 338 int perf_event__process(const struct perf_tool << 339 union perf_event *even 620 union perf_event *event, 340 struct perf_sample *sa 621 struct perf_sample *sample, 341 struct machine *machin 622 struct machine *machine); 342 623 >> 624 struct addr_location; >> 625 >> 626 int machine__resolve(struct machine *machine, struct addr_location *al, >> 627 struct perf_sample *sample); >> 628 >> 629 void addr_location__put(struct addr_location *al); >> 630 >> 631 struct thread; >> 632 343 bool is_bts_event(struct perf_event_attr *attr 633 bool is_bts_event(struct perf_event_attr *attr); 344 bool sample_addr_correlates_sym(struct perf_ev 634 bool sample_addr_correlates_sym(struct perf_event_attr *attr); >> 635 void thread__resolve(struct thread *thread, struct addr_location *al, >> 636 struct perf_sample *sample); 345 637 346 const char *perf_event__name(unsigned int id); 638 const char *perf_event__name(unsigned int id); 347 639 >> 640 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, >> 641 u64 read_format); >> 642 int perf_event__synthesize_sample(union perf_event *event, u64 type, >> 643 u64 read_format, >> 644 const struct perf_sample *sample, >> 645 bool swapped); >> 646 >> 647 pid_t perf_event__synthesize_comm(struct perf_tool *tool, >> 648 union perf_event *event, pid_t pid, >> 649 perf_event__handler_t process, >> 650 struct machine *machine); >> 651 >> 652 int perf_event__synthesize_namespaces(struct perf_tool *tool, >> 653 union perf_event *event, >> 654 pid_t pid, pid_t tgid, >> 655 perf_event__handler_t process, >> 656 struct machine *machine); >> 657 >> 658 int perf_event__synthesize_mmap_events(struct perf_tool *tool, >> 659 union perf_event *event, >> 660 pid_t pid, pid_t tgid, >> 661 perf_event__handler_t process, >> 662 struct machine *machine, >> 663 bool mmap_data, >> 664 unsigned int proc_map_timeout); >> 665 348 size_t perf_event__fprintf_comm(union perf_eve 666 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); 349 size_t perf_event__fprintf_mmap(union perf_eve 667 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); 350 size_t perf_event__fprintf_mmap2(union perf_ev 668 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); 351 size_t perf_event__fprintf_task(union perf_eve 669 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); 352 size_t perf_event__fprintf_aux(union perf_even 670 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp); 353 size_t perf_event__fprintf_itrace_start(union 671 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp); 354 size_t perf_event__fprintf_aux_output_hw_id(un << 355 size_t perf_event__fprintf_switch(union perf_e 672 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp); 356 size_t perf_event__fprintf_thread_map(union pe 673 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp); 357 size_t perf_event__fprintf_cpu_map(union perf_ 674 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp); 358 size_t perf_event__fprintf_namespaces(union pe 675 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp); 359 size_t perf_event__fprintf_cgroup(union perf_e !! 676 size_t perf_event__fprintf(union perf_event *event, FILE *fp); 360 size_t perf_event__fprintf_ksymbol(union perf_ << 361 size_t perf_event__fprintf_bpf(union perf_even << 362 size_t perf_event__fprintf_text_poke(union per << 363 size_t perf_event__fprintf(union perf_event *e << 364 677 365 int kallsyms__get_function_start(const char *k 678 int kallsyms__get_function_start(const char *kallsyms_filename, 366 const char *s 679 const char *symbol_name, u64 *addr); 367 int kallsyms__get_symbol_start(const char *kal !! 680 368 const char *sym !! 681 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max); >> 682 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map, >> 683 u16 type, int max); 369 684 370 void event_attr_init(struct perf_event_attr *a 685 void event_attr_init(struct perf_event_attr *attr); 371 686 372 int perf_event_paranoid(void); 687 int perf_event_paranoid(void); 373 bool perf_event_paranoid_check(int max_level); << 374 688 375 extern int sysctl_perf_event_max_stack; 689 extern int sysctl_perf_event_max_stack; 376 extern int sysctl_perf_event_max_contexts_per_ 690 extern int sysctl_perf_event_max_contexts_per_stack; 377 extern unsigned int proc_map_timeout; << 378 << 379 #define PAGE_SIZE_NAME_LEN 32 << 380 char *get_page_size_name(u64 size, char *str); << 381 << 382 void arch_perf_parse_sample_weight(struct perf << 383 void arch_perf_synthesize_sample_weight(const << 384 const char *arch_perf_header_entry(const char << 385 int arch_support_sort_key(const char *sort_key << 386 << 387 static inline bool perf_event_header__cpumode_ << 388 { << 389 return cpumode == PERF_RECORD_MISC_GUE << 390 cpumode == PERF_RECORD_MISC_GUE << 391 } << 392 << 393 static inline bool perf_event_header__misc_is_ << 394 { << 395 return perf_event_header__cpumode_is_g << 396 } << 397 << 398 static inline bool perf_event_header__is_guest << 399 { << 400 return perf_event_header__misc_is_gues << 401 } << 402 << 403 static inline bool perf_event__is_guest(const << 404 { << 405 return perf_event_header__is_guest(&ev << 406 } << 407 691 408 #endif /* __PERF_RECORD_H */ 692 #endif /* __PERF_RECORD_H */ 409 693
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.