1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <linux/string.h> 5 6 #include <sched.h> 7 #include <perf/mmap.h> 8 #include "event.h" 9 #include "evlist.h" 10 #include "evsel.h" 11 #include "debug.h" 12 #include "record.h" 13 #include "tests.h" 14 #include "util/mmap.h" 15 #include "util/sample.h" 16 17 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) 18 { 19 int i, cpu = -1, nrcpus = 1024; 20 realloc: 21 CPU_ZERO(maskp); 22 23 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { 24 if (errno == EINVAL && nrcpus < (1024 << 8)) { 25 nrcpus = nrcpus << 2; 26 goto realloc; 27 } 28 perror("sched_getaffinity"); 29 return -1; 30 } 31 32 for (i = 0; i < nrcpus; i++) { 33 if (CPU_ISSET(i, maskp)) { 34 if (cpu == -1) 35 cpu = i; 36 else 37 CPU_CLR(i, maskp); 38 } 39 } 40 41 return cpu; 42 } 43 44 static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 45 { 46 struct record_opts opts = { 47 .target = { 48 .uid = UINT_MAX, 49 .uses_mmap = true, 50 }, 51 .no_buffering = true, 52 .mmap_pages = 256, 53 }; 54 cpu_set_t cpu_mask; 55 size_t cpu_mask_size = sizeof(cpu_mask); 56 struct evlist *evlist = evlist__new_dummy(); 57 struct evsel *evsel; 58 struct perf_sample sample; 59 const char *cmd = "sleep"; 60 const char *argv[] = { cmd, "1", NULL, }; 61 char *bname, *mmap_filename; 62 u64 prev_time = 0; 63 bool found_cmd_mmap = false, 64 found_coreutils_mmap = false, 65 found_libc_mmap = false, 66 found_vdso_mmap = false, 67 found_ld_mmap = false; 68 int err = -1, errs = 0, i, wakeups = 0; 69 u32 cpu; 70 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; 71 char sbuf[STRERR_BUFSIZE]; 72 73 if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */ 74 evlist = evlist__new_default(); 75 76 if (evlist == NULL) { 77 pr_debug("Not enough memory to create evlist\n"); 78 goto out; 79 } 80 81 /* 82 * Create maps of threads and cpus to monitor. In this case 83 * we start with all threads and cpus (-1, -1) but then in 84 * evlist__prepare_workload we'll fill in the only thread 85 * we're monitoring, the one forked there. 86 */ 87 err = evlist__create_maps(evlist, &opts.target); 88 if (err < 0) { 89 pr_debug("Not enough memory to create thread/cpu maps\n"); 90 goto out_delete_evlist; 91 } 92 93 /* 94 * Prepare the workload in argv[] to run, it'll fork it, and then wait 95 * for evlist__start_workload() to exec it. This is done this way 96 * so that we have time to open the evlist (calling sys_perf_event_open 97 * on all the fds) and then mmap them. 98 */ 99 err = evlist__prepare_workload(evlist, &opts.target, argv, false, NULL); 100 if (err < 0) { 101 pr_debug("Couldn't run the workload!\n"); 102 goto out_delete_evlist; 103 } 104 105 /* 106 * Config the evsels, setting attr->comm on the first one, etc. 107 */ 108 evsel = evlist__first(evlist); 109 evsel__set_sample_bit(evsel, CPU); 110 evsel__set_sample_bit(evsel, TID); 111 evsel__set_sample_bit(evsel, TIME); 112 evlist__config(evlist, &opts, NULL); 113 114 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); 115 if (err < 0) { 116 pr_debug("sched__get_first_possible_cpu: %s\n", 117 str_error_r(errno, sbuf, sizeof(sbuf))); 118 goto out_delete_evlist; 119 } 120 121 cpu = err; 122 123 /* 124 * So that we can check perf_sample.cpu on all the samples. 125 */ 126 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { 127 pr_debug("sched_setaffinity: %s\n", 128 str_error_r(errno, sbuf, sizeof(sbuf))); 129 goto out_delete_evlist; 130 } 131 132 /* 133 * Call sys_perf_event_open on all the fds on all the evsels, 134 * grouping them if asked to. 135 */ 136 err = evlist__open(evlist); 137 if (err < 0) { 138 pr_debug("perf_evlist__open: %s\n", 139 str_error_r(errno, sbuf, sizeof(sbuf))); 140 goto out_delete_evlist; 141 } 142 143 /* 144 * mmap the first fd on a given CPU and ask for events for the other 145 * fds in the same CPU to be injected in the same mmap ring buffer 146 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). 147 */ 148 err = evlist__mmap(evlist, opts.mmap_pages); 149 if (err < 0) { 150 pr_debug("evlist__mmap: %s\n", 151 str_error_r(errno, sbuf, sizeof(sbuf))); 152 goto out_delete_evlist; 153 } 154 155 /* 156 * Now that all is properly set up, enable the events, they will 157 * count just on workload.pid, which will start... 158 */ 159 evlist__enable(evlist); 160 161 /* 162 * Now! 163 */ 164 evlist__start_workload(evlist); 165 166 while (1) { 167 int before = total_events; 168 169 for (i = 0; i < evlist->core.nr_mmaps; i++) { 170 union perf_event *event; 171 struct mmap *md; 172 173 md = &evlist->mmap[i]; 174 if (perf_mmap__read_init(&md->core) < 0) 175 continue; 176 177 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 178 const u32 type = event->header.type; 179 const char *name = perf_event__name(type); 180 181 ++total_events; 182 if (type < PERF_RECORD_MAX) 183 nr_events[type]++; 184 185 err = evlist__parse_sample(evlist, event, &sample); 186 if (err < 0) { 187 if (verbose > 0) 188 perf_event__fprintf(event, NULL, stderr); 189 pr_debug("Couldn't parse sample\n"); 190 goto out_delete_evlist; 191 } 192 193 if (verbose > 0) { 194 pr_info("%" PRIu64" %d ", sample.time, sample.cpu); 195 perf_event__fprintf(event, NULL, stderr); 196 } 197 198 if (prev_time > sample.time) { 199 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", 200 name, prev_time, sample.time); 201 ++errs; 202 } 203 204 prev_time = sample.time; 205 206 if (sample.cpu != cpu) { 207 pr_debug("%s with unexpected cpu, expected %d, got %d\n", 208 name, cpu, sample.cpu); 209 ++errs; 210 } 211 212 if ((pid_t)sample.pid != evlist->workload.pid) { 213 pr_debug("%s with unexpected pid, expected %d, got %d\n", 214 name, evlist->workload.pid, sample.pid); 215 ++errs; 216 } 217 218 if ((pid_t)sample.tid != evlist->workload.pid) { 219 pr_debug("%s with unexpected tid, expected %d, got %d\n", 220 name, evlist->workload.pid, sample.tid); 221 ++errs; 222 } 223 224 if ((type == PERF_RECORD_COMM || 225 type == PERF_RECORD_MMAP || 226 type == PERF_RECORD_MMAP2 || 227 type == PERF_RECORD_FORK || 228 type == PERF_RECORD_EXIT) && 229 (pid_t)event->comm.pid != evlist->workload.pid) { 230 pr_debug("%s with unexpected pid/tid\n", name); 231 ++errs; 232 } 233 234 if ((type == PERF_RECORD_COMM || 235 type == PERF_RECORD_MMAP || 236 type == PERF_RECORD_MMAP2) && 237 event->comm.pid != event->comm.tid) { 238 pr_debug("%s with different pid/tid!\n", name); 239 ++errs; 240 } 241 242 switch (type) { 243 case PERF_RECORD_COMM: 244 if (strcmp(event->comm.comm, cmd)) { 245 pr_debug("%s with unexpected comm!\n", name); 246 ++errs; 247 } 248 break; 249 case PERF_RECORD_EXIT: 250 goto found_exit; 251 case PERF_RECORD_MMAP: 252 mmap_filename = event->mmap.filename; 253 goto check_bname; 254 case PERF_RECORD_MMAP2: 255 mmap_filename = event->mmap2.filename; 256 check_bname: 257 bname = strrchr(mmap_filename, '/'); 258 if (bname != NULL) { 259 if (!found_cmd_mmap) 260 found_cmd_mmap = !strcmp(bname + 1, cmd); 261 if (!found_coreutils_mmap) 262 found_coreutils_mmap = !strcmp(bname + 1, "coreutils"); 263 if (!found_libc_mmap) 264 found_libc_mmap = !strncmp(bname + 1, "libc", 4); 265 if (!found_ld_mmap) 266 found_ld_mmap = !strncmp(bname + 1, "ld", 2); 267 } else if (!found_vdso_mmap) 268 found_vdso_mmap = !strcmp(mmap_filename, "[vdso]"); 269 break; 270 271 case PERF_RECORD_SAMPLE: 272 /* Just ignore samples for now */ 273 break; 274 default: 275 pr_debug("Unexpected perf_event->header.type %d!\n", 276 type); 277 ++errs; 278 } 279 280 perf_mmap__consume(&md->core); 281 } 282 perf_mmap__read_done(&md->core); 283 } 284 285 /* 286 * We don't use poll here because at least at 3.1 times the 287 * PERF_RECORD_{!SAMPLE} events don't honour 288 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. 289 */ 290 if (total_events == before && false) 291 evlist__poll(evlist, -1); 292 293 sleep(1); 294 if (++wakeups > 5) { 295 pr_debug("No PERF_RECORD_EXIT event!\n"); 296 break; 297 } 298 } 299 300 found_exit: 301 if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) { 302 pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); 303 ++errs; 304 } 305 306 if (nr_events[PERF_RECORD_COMM] == 0) { 307 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); 308 ++errs; 309 } 310 311 if (!found_cmd_mmap && !found_coreutils_mmap) { 312 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); 313 ++errs; 314 } 315 316 if (!found_libc_mmap) { 317 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); 318 ++errs; 319 } 320 321 if (!found_ld_mmap) { 322 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); 323 ++errs; 324 } 325 326 if (!found_vdso_mmap) { 327 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); 328 ++errs; 329 } 330 out_delete_evlist: 331 evlist__delete(evlist); 332 out: 333 if (err == -EACCES) 334 return TEST_SKIP; 335 if (err < 0 || errs != 0) 336 return TEST_FAIL; 337 return TEST_OK; 338 } 339 340 static struct test_case tests__PERF_RECORD[] = { 341 TEST_CASE_REASON("PERF_RECORD_* events & perf_sample fields", 342 PERF_RECORD, 343 "permissions"), 344 { .name = NULL, } 345 }; 346 347 struct test_suite suite__PERF_RECORD = { 348 .desc = "PERF_RECORD_* events & perf_sample fields", 349 .test_cases = tests__PERF_RECORD, 350 }; 351
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.