~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/kernel/perf_pai_crypto.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Performance event support - Processor Activity Instrumentation Facility
  4  *
  5  *  Copyright IBM Corp. 2022
  6  *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
  7  */
  8 #define KMSG_COMPONENT  "pai_crypto"
  9 #define pr_fmt(fmt)     KMSG_COMPONENT ": " fmt
 10 
 11 #include <linux/kernel.h>
 12 #include <linux/kernel_stat.h>
 13 #include <linux/percpu.h>
 14 #include <linux/notifier.h>
 15 #include <linux/init.h>
 16 #include <linux/export.h>
 17 #include <linux/io.h>
 18 #include <linux/perf_event.h>
 19 #include <asm/ctlreg.h>
 20 #include <asm/pai.h>
 21 #include <asm/debug.h>
 22 
 23 static debug_info_t *cfm_dbg;
 24 static unsigned int paicrypt_cnt;       /* Size of the mapped counter sets */
 25                                         /* extracted with QPACI instruction */
 26 
 27 DEFINE_STATIC_KEY_FALSE(pai_key);
 28 
 29 struct pai_userdata {
 30         u16 num;
 31         u64 value;
 32 } __packed;
 33 
 34 struct paicrypt_map {
 35         unsigned long *page;            /* Page for CPU to store counters */
 36         struct pai_userdata *save;      /* Page to store no-zero counters */
 37         unsigned int active_events;     /* # of PAI crypto users */
 38         refcount_t refcnt;              /* Reference count mapped buffers */
 39         struct perf_event *event;       /* Perf event for sampling */
 40         struct list_head syswide_list;  /* List system-wide sampling events */
 41 };
 42 
 43 struct paicrypt_mapptr {
 44         struct paicrypt_map *mapptr;
 45 };
 46 
 47 static struct paicrypt_root {           /* Anchor to per CPU data */
 48         refcount_t refcnt;              /* Overall active events */
 49         struct paicrypt_mapptr __percpu *mapptr;
 50 } paicrypt_root;
 51 
 52 /* Free per CPU data when the last event is removed. */
 53 static void paicrypt_root_free(void)
 54 {
 55         if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
 56                 free_percpu(paicrypt_root.mapptr);
 57                 paicrypt_root.mapptr = NULL;
 58         }
 59         debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
 60                             refcount_read(&paicrypt_root.refcnt));
 61 }
 62 
 63 /*
 64  * On initialization of first event also allocate per CPU data dynamically.
 65  * Start with an array of pointers, the array size is the maximum number of
 66  * CPUs possible, which might be larger than the number of CPUs currently
 67  * online.
 68  */
 69 static int paicrypt_root_alloc(void)
 70 {
 71         if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
 72                 /* The memory is already zeroed. */
 73                 paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
 74                 if (!paicrypt_root.mapptr)
 75                         return -ENOMEM;
 76                 refcount_set(&paicrypt_root.refcnt, 1);
 77         }
 78         return 0;
 79 }
 80 
 81 /* Release the PMU if event is the last perf event */
 82 static DEFINE_MUTEX(pai_reserve_mutex);
 83 
 84 /* Adjust usage counters and remove allocated memory when all users are
 85  * gone.
 86  */
 87 static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
 88 {
 89         struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
 90         struct paicrypt_map *cpump = mp->mapptr;
 91 
 92         mutex_lock(&pai_reserve_mutex);
 93         debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d "
 94                             "refcnt %u\n", __func__, event->attr.config,
 95                             event->cpu, cpump->active_events,
 96                             refcount_read(&cpump->refcnt));
 97         if (refcount_dec_and_test(&cpump->refcnt)) {
 98                 debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
 99                                     __func__, (unsigned long)cpump->page,
100                                     cpump->save);
101                 free_page((unsigned long)cpump->page);
102                 kvfree(cpump->save);
103                 kfree(cpump);
104                 mp->mapptr = NULL;
105         }
106         paicrypt_root_free();
107         mutex_unlock(&pai_reserve_mutex);
108 }
109 
110 static void paicrypt_event_destroy(struct perf_event *event)
111 {
112         int cpu;
113 
114         static_branch_dec(&pai_key);
115         free_page(PAI_SAVE_AREA(event));
116         if (event->cpu == -1) {
117                 struct cpumask *mask = PAI_CPU_MASK(event);
118 
119                 for_each_cpu(cpu, mask)
120                         paicrypt_event_destroy_cpu(event, cpu);
121                 kfree(mask);
122         } else {
123                 paicrypt_event_destroy_cpu(event, event->cpu);
124         }
125 }
126 
127 static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
128 {
129         if (kernel)
130                 nr += PAI_CRYPTO_MAXCTR;
131         return page[nr];
132 }
133 
134 /* Read the counter values. Return value from location in CMP. For event
135  * CRYPTO_ALL sum up all events.
136  */
137 static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
138 {
139         struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
140         struct paicrypt_map *cpump = mp->mapptr;
141         u64 sum = 0;
142         int i;
143 
144         if (event->attr.config != PAI_CRYPTO_BASE) {
145                 return paicrypt_getctr(cpump->page,
146                                        event->attr.config - PAI_CRYPTO_BASE,
147                                        kernel);
148         }
149 
150         for (i = 1; i <= paicrypt_cnt; i++) {
151                 u64 val = paicrypt_getctr(cpump->page, i, kernel);
152 
153                 if (!val)
154                         continue;
155                 sum += val;
156         }
157         return sum;
158 }
159 
160 static u64 paicrypt_getall(struct perf_event *event)
161 {
162         u64 sum = 0;
163 
164         if (!event->attr.exclude_kernel)
165                 sum += paicrypt_getdata(event, true);
166         if (!event->attr.exclude_user)
167                 sum += paicrypt_getdata(event, false);
168 
169         return sum;
170 }
171 
172 /* Check concurrent access of counting and sampling for crypto events.
173  * This function is called in process context and it is save to block.
174  * When the event initialization functions fails, no other call back will
175  * be invoked.
176  *
177  * Allocate the memory for the event.
178  */
179 static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
180 {
181         struct paicrypt_map *cpump = NULL;
182         struct paicrypt_mapptr *mp;
183         int rc;
184 
185         mutex_lock(&pai_reserve_mutex);
186 
187         /* Allocate root node */
188         rc = paicrypt_root_alloc();
189         if (rc)
190                 goto unlock;
191 
192         /* Allocate node for this event */
193         mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
194         cpump = mp->mapptr;
195         if (!cpump) {                   /* Paicrypt_map allocated? */
196                 cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
197                 if (!cpump) {
198                         rc = -ENOMEM;
199                         goto free_root;
200                 }
201                 INIT_LIST_HEAD(&cpump->syswide_list);
202         }
203 
204         /* Allocate memory for counter page and counter extraction.
205          * Only the first counting event has to allocate a page.
206          */
207         if (cpump->page) {
208                 refcount_inc(&cpump->refcnt);
209                 goto unlock;
210         }
211 
212         rc = -ENOMEM;
213         cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
214         if (!cpump->page)
215                 goto free_paicrypt_map;
216         cpump->save = kvmalloc_array(paicrypt_cnt + 1,
217                                      sizeof(struct pai_userdata), GFP_KERNEL);
218         if (!cpump->save) {
219                 free_page((unsigned long)cpump->page);
220                 cpump->page = NULL;
221                 goto free_paicrypt_map;
222         }
223 
224         /* Set mode and reference count */
225         rc = 0;
226         refcount_set(&cpump->refcnt, 1);
227         mp->mapptr = cpump;
228         debug_sprintf_event(cfm_dbg, 5, "%s users %d refcnt %u page %#lx "
229                             "save %p rc %d\n", __func__, cpump->active_events,
230                             refcount_read(&cpump->refcnt),
231                             (unsigned long)cpump->page, cpump->save, rc);
232         goto unlock;
233 
234 free_paicrypt_map:
235         /* Undo memory allocation */
236         kfree(cpump);
237         mp->mapptr = NULL;
238 free_root:
239         paicrypt_root_free();
240 unlock:
241         mutex_unlock(&pai_reserve_mutex);
242         return rc ? ERR_PTR(rc) : cpump;
243 }
244 
245 static int paicrypt_event_init_all(struct perf_event *event)
246 {
247         struct paicrypt_map *cpump;
248         struct cpumask *maskptr;
249         int cpu, rc = -ENOMEM;
250 
251         maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
252         if (!maskptr)
253                 goto out;
254 
255         for_each_online_cpu(cpu) {
256                 cpump = paicrypt_busy(event, cpu);
257                 if (IS_ERR(cpump)) {
258                         for_each_cpu(cpu, maskptr)
259                                 paicrypt_event_destroy_cpu(event, cpu);
260                         kfree(maskptr);
261                         rc = PTR_ERR(cpump);
262                         goto out;
263                 }
264                 cpumask_set_cpu(cpu, maskptr);
265         }
266 
267         /*
268          * On error all cpumask are freed and all events have been destroyed.
269          * Save of which CPUs data structures have been allocated for.
270          * Release them in paicrypt_event_destroy call back function
271          * for this event.
272          */
273         PAI_CPU_MASK(event) = maskptr;
274         rc = 0;
275 out:
276         return rc;
277 }
278 
279 /* Might be called on different CPU than the one the event is intended for. */
280 static int paicrypt_event_init(struct perf_event *event)
281 {
282         struct perf_event_attr *a = &event->attr;
283         struct paicrypt_map *cpump;
284         int rc = 0;
285 
286         /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
287         if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
288                 return -ENOENT;
289         /* PAI crypto event must be in valid range */
290         if (a->config < PAI_CRYPTO_BASE ||
291             a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
292                 return -EINVAL;
293         /* Allow only CRYPTO_ALL for sampling */
294         if (a->sample_period && a->config != PAI_CRYPTO_BASE)
295                 return -EINVAL;
296         /* Get a page to store last counter values for sampling */
297         if (a->sample_period) {
298                 PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
299                 if (!PAI_SAVE_AREA(event)) {
300                         rc = -ENOMEM;
301                         goto out;
302                 }
303         }
304 
305         if (event->cpu >= 0) {
306                 cpump = paicrypt_busy(event, event->cpu);
307                 if (IS_ERR(cpump))
308                         rc = PTR_ERR(cpump);
309         } else {
310                 rc = paicrypt_event_init_all(event);
311         }
312         if (rc) {
313                 free_page(PAI_SAVE_AREA(event));
314                 goto out;
315         }
316         event->destroy = paicrypt_event_destroy;
317 
318         if (a->sample_period) {
319                 a->sample_period = 1;
320                 a->freq = 0;
321                 /* Register for paicrypt_sched_task() to be called */
322                 event->attach_state |= PERF_ATTACH_SCHED_CB;
323                 /* Add raw data which contain the memory mapped counters */
324                 a->sample_type |= PERF_SAMPLE_RAW;
325                 /* Turn off inheritance */
326                 a->inherit = 0;
327         }
328 
329         static_branch_inc(&pai_key);
330 out:
331         return rc;
332 }
333 
334 static void paicrypt_read(struct perf_event *event)
335 {
336         u64 prev, new, delta;
337 
338         prev = local64_read(&event->hw.prev_count);
339         new = paicrypt_getall(event);
340         local64_set(&event->hw.prev_count, new);
341         delta = (prev <= new) ? new - prev
342                               : (-1ULL - prev) + new + 1;        /* overflow */
343         local64_add(delta, &event->count);
344 }
345 
346 static void paicrypt_start(struct perf_event *event, int flags)
347 {
348         struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
349         struct paicrypt_map *cpump = mp->mapptr;
350         u64 sum;
351 
352         if (!event->attr.sample_period) {       /* Counting */
353                 sum = paicrypt_getall(event);   /* Get current value */
354                 local64_set(&event->hw.prev_count, sum);
355         } else {                                /* Sampling */
356                 memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
357                 /* Enable context switch callback for system-wide sampling */
358                 if (!(event->attach_state & PERF_ATTACH_TASK)) {
359                         list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
360                         perf_sched_cb_inc(event->pmu);
361                 } else {
362                         cpump->event = event;
363                 }
364         }
365 }
366 
367 static int paicrypt_add(struct perf_event *event, int flags)
368 {
369         struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
370         struct paicrypt_map *cpump = mp->mapptr;
371         unsigned long ccd;
372 
373         if (++cpump->active_events == 1) {
374                 ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
375                 WRITE_ONCE(get_lowcore()->ccd, ccd);
376                 local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
377         }
378         if (flags & PERF_EF_START)
379                 paicrypt_start(event, PERF_EF_RELOAD);
380         event->hw.state = 0;
381         return 0;
382 }
383 
384 static void paicrypt_have_sample(struct perf_event *, struct paicrypt_map *);
385 static void paicrypt_stop(struct perf_event *event, int flags)
386 {
387         struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
388         struct paicrypt_map *cpump = mp->mapptr;
389 
390         if (!event->attr.sample_period) {       /* Counting */
391                 paicrypt_read(event);
392         } else {                                /* Sampling */
393                 if (!(event->attach_state & PERF_ATTACH_TASK)) {
394                         perf_sched_cb_dec(event->pmu);
395                         list_del(PAI_SWLIST(event));
396                 } else {
397                         paicrypt_have_sample(event, cpump);
398                         cpump->event = NULL;
399                 }
400         }
401         event->hw.state = PERF_HES_STOPPED;
402 }
403 
404 static void paicrypt_del(struct perf_event *event, int flags)
405 {
406         struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
407         struct paicrypt_map *cpump = mp->mapptr;
408 
409         paicrypt_stop(event, PERF_EF_UPDATE);
410         if (--cpump->active_events == 0) {
411                 local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
412                 WRITE_ONCE(get_lowcore()->ccd, 0);
413         }
414 }
415 
416 /* Create raw data and save it in buffer. Calculate the delta for each
417  * counter between this invocation and the last invocation.
418  * Returns number of bytes copied.
419  * Saves only entries with positive counter difference of the form
420  * 2 bytes: Number of counter
421  * 8 bytes: Value of counter
422  */
423 static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
424                             unsigned long *page_old, bool exclude_user,
425                             bool exclude_kernel)
426 {
427         int i, outidx = 0;
428 
429         for (i = 1; i <= paicrypt_cnt; i++) {
430                 u64 val = 0, val_old = 0;
431 
432                 if (!exclude_kernel) {
433                         val += paicrypt_getctr(page, i, true);
434                         val_old += paicrypt_getctr(page_old, i, true);
435                 }
436                 if (!exclude_user) {
437                         val += paicrypt_getctr(page, i, false);
438                         val_old += paicrypt_getctr(page_old, i, false);
439                 }
440                 if (val >= val_old)
441                         val -= val_old;
442                 else
443                         val = (~0ULL - val_old) + val + 1;
444                 if (val) {
445                         userdata[outidx].num = i;
446                         userdata[outidx].value = val;
447                         outidx++;
448                 }
449         }
450         return outidx * sizeof(struct pai_userdata);
451 }
452 
453 static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
454                                 struct perf_event *event)
455 {
456         struct perf_sample_data data;
457         struct perf_raw_record raw;
458         struct pt_regs regs;
459         int overflow;
460 
461         /* Setup perf sample */
462         memset(&regs, 0, sizeof(regs));
463         memset(&raw, 0, sizeof(raw));
464         memset(&data, 0, sizeof(data));
465         perf_sample_data_init(&data, 0, event->hw.last_period);
466         if (event->attr.sample_type & PERF_SAMPLE_TID) {
467                 data.tid_entry.pid = task_tgid_nr(current);
468                 data.tid_entry.tid = task_pid_nr(current);
469         }
470         if (event->attr.sample_type & PERF_SAMPLE_TIME)
471                 data.time = event->clock();
472         if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
473                 data.id = event->id;
474         if (event->attr.sample_type & PERF_SAMPLE_CPU) {
475                 data.cpu_entry.cpu = smp_processor_id();
476                 data.cpu_entry.reserved = 0;
477         }
478         if (event->attr.sample_type & PERF_SAMPLE_RAW) {
479                 raw.frag.size = rawsize;
480                 raw.frag.data = cpump->save;
481                 perf_sample_save_raw_data(&data, &raw);
482         }
483 
484         overflow = perf_event_overflow(event, &data, &regs);
485         perf_event_update_userpage(event);
486         /* Save crypto counter lowcore page after reading event data. */
487         memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
488         return overflow;
489 }
490 
491 /* Check if there is data to be saved on schedule out of a task. */
492 static void paicrypt_have_sample(struct perf_event *event,
493                                  struct paicrypt_map *cpump)
494 {
495         size_t rawsize;
496 
497         if (!event)             /* No event active */
498                 return;
499         rawsize = paicrypt_copy(cpump->save, cpump->page,
500                                 (unsigned long *)PAI_SAVE_AREA(event),
501                                 event->attr.exclude_user,
502                                 event->attr.exclude_kernel);
503         if (rawsize)                    /* No incremented counters */
504                 paicrypt_push_sample(rawsize, cpump, event);
505 }
506 
507 /* Check if there is data to be saved on schedule out of a task. */
508 static void paicrypt_have_samples(void)
509 {
510         struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
511         struct paicrypt_map *cpump = mp->mapptr;
512         struct perf_event *event;
513 
514         list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
515                 paicrypt_have_sample(event, cpump);
516 }
517 
518 /* Called on schedule-in and schedule-out. No access to event structure,
519  * but for sampling only event CRYPTO_ALL is allowed.
520  */
521 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
522 {
523         /* We started with a clean page on event installation. So read out
524          * results on schedule_out and if page was dirty, save old values.
525          */
526         if (!sched_in)
527                 paicrypt_have_samples();
528 }
529 
530 /* Attribute definitions for paicrypt interface. As with other CPU
531  * Measurement Facilities, there is one attribute per mapped counter.
532  * The number of mapped counters may vary per machine generation. Use
533  * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
534  * to determine the number of mapped counters. The instructions returns
535  * a positive number, which is the highest number of supported counters.
536  * All counters less than this number are also supported, there are no
537  * holes. A returned number of zero means no support for mapped counters.
538  *
539  * The identification of the counter is a unique number. The chosen range
540  * is 0x1000 + offset in mapped kernel page.
541  * All CPU Measurement Facility counters identifiers must be unique and
542  * the numbers from 0 to 496 are already used for the CPU Measurement
543  * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
544  * used for the CPU Measurement Sampling facility.
545  */
546 PMU_FORMAT_ATTR(event, "config:0-63");
547 
548 static struct attribute *paicrypt_format_attr[] = {
549         &format_attr_event.attr,
550         NULL,
551 };
552 
553 static struct attribute_group paicrypt_events_group = {
554         .name = "events",
555         .attrs = NULL                   /* Filled in attr_event_init() */
556 };
557 
558 static struct attribute_group paicrypt_format_group = {
559         .name = "format",
560         .attrs = paicrypt_format_attr,
561 };
562 
563 static const struct attribute_group *paicrypt_attr_groups[] = {
564         &paicrypt_events_group,
565         &paicrypt_format_group,
566         NULL,
567 };
568 
569 /* Performance monitoring unit for mapped counters */
570 static struct pmu paicrypt = {
571         .task_ctx_nr  = perf_hw_context,
572         .event_init   = paicrypt_event_init,
573         .add          = paicrypt_add,
574         .del          = paicrypt_del,
575         .start        = paicrypt_start,
576         .stop         = paicrypt_stop,
577         .read         = paicrypt_read,
578         .sched_task   = paicrypt_sched_task,
579         .attr_groups  = paicrypt_attr_groups
580 };
581 
582 /* List of symbolic PAI counter names. */
583 static const char * const paicrypt_ctrnames[] = {
584         [0] = "CRYPTO_ALL",
585         [1] = "KM_DEA",
586         [2] = "KM_TDEA_128",
587         [3] = "KM_TDEA_192",
588         [4] = "KM_ENCRYPTED_DEA",
589         [5] = "KM_ENCRYPTED_TDEA_128",
590         [6] = "KM_ENCRYPTED_TDEA_192",
591         [7] = "KM_AES_128",
592         [8] = "KM_AES_192",
593         [9] = "KM_AES_256",
594         [10] = "KM_ENCRYPTED_AES_128",
595         [11] = "KM_ENCRYPTED_AES_192",
596         [12] = "KM_ENCRYPTED_AES_256",
597         [13] = "KM_XTS_AES_128",
598         [14] = "KM_XTS_AES_256",
599         [15] = "KM_XTS_ENCRYPTED_AES_128",
600         [16] = "KM_XTS_ENCRYPTED_AES_256",
601         [17] = "KMC_DEA",
602         [18] = "KMC_TDEA_128",
603         [19] = "KMC_TDEA_192",
604         [20] = "KMC_ENCRYPTED_DEA",
605         [21] = "KMC_ENCRYPTED_TDEA_128",
606         [22] = "KMC_ENCRYPTED_TDEA_192",
607         [23] = "KMC_AES_128",
608         [24] = "KMC_AES_192",
609         [25] = "KMC_AES_256",
610         [26] = "KMC_ENCRYPTED_AES_128",
611         [27] = "KMC_ENCRYPTED_AES_192",
612         [28] = "KMC_ENCRYPTED_AES_256",
613         [29] = "KMC_PRNG",
614         [30] = "KMA_GCM_AES_128",
615         [31] = "KMA_GCM_AES_192",
616         [32] = "KMA_GCM_AES_256",
617         [33] = "KMA_GCM_ENCRYPTED_AES_128",
618         [34] = "KMA_GCM_ENCRYPTED_AES_192",
619         [35] = "KMA_GCM_ENCRYPTED_AES_256",
620         [36] = "KMF_DEA",
621         [37] = "KMF_TDEA_128",
622         [38] = "KMF_TDEA_192",
623         [39] = "KMF_ENCRYPTED_DEA",
624         [40] = "KMF_ENCRYPTED_TDEA_128",
625         [41] = "KMF_ENCRYPTED_TDEA_192",
626         [42] = "KMF_AES_128",
627         [43] = "KMF_AES_192",
628         [44] = "KMF_AES_256",
629         [45] = "KMF_ENCRYPTED_AES_128",
630         [46] = "KMF_ENCRYPTED_AES_192",
631         [47] = "KMF_ENCRYPTED_AES_256",
632         [48] = "KMCTR_DEA",
633         [49] = "KMCTR_TDEA_128",
634         [50] = "KMCTR_TDEA_192",
635         [51] = "KMCTR_ENCRYPTED_DEA",
636         [52] = "KMCTR_ENCRYPTED_TDEA_128",
637         [53] = "KMCTR_ENCRYPTED_TDEA_192",
638         [54] = "KMCTR_AES_128",
639         [55] = "KMCTR_AES_192",
640         [56] = "KMCTR_AES_256",
641         [57] = "KMCTR_ENCRYPTED_AES_128",
642         [58] = "KMCTR_ENCRYPTED_AES_192",
643         [59] = "KMCTR_ENCRYPTED_AES_256",
644         [60] = "KMO_DEA",
645         [61] = "KMO_TDEA_128",
646         [62] = "KMO_TDEA_192",
647         [63] = "KMO_ENCRYPTED_DEA",
648         [64] = "KMO_ENCRYPTED_TDEA_128",
649         [65] = "KMO_ENCRYPTED_TDEA_192",
650         [66] = "KMO_AES_128",
651         [67] = "KMO_AES_192",
652         [68] = "KMO_AES_256",
653         [69] = "KMO_ENCRYPTED_AES_128",
654         [70] = "KMO_ENCRYPTED_AES_192",
655         [71] = "KMO_ENCRYPTED_AES_256",
656         [72] = "KIMD_SHA_1",
657         [73] = "KIMD_SHA_256",
658         [74] = "KIMD_SHA_512",
659         [75] = "KIMD_SHA3_224",
660         [76] = "KIMD_SHA3_256",
661         [77] = "KIMD_SHA3_384",
662         [78] = "KIMD_SHA3_512",
663         [79] = "KIMD_SHAKE_128",
664         [80] = "KIMD_SHAKE_256",
665         [81] = "KIMD_GHASH",
666         [82] = "KLMD_SHA_1",
667         [83] = "KLMD_SHA_256",
668         [84] = "KLMD_SHA_512",
669         [85] = "KLMD_SHA3_224",
670         [86] = "KLMD_SHA3_256",
671         [87] = "KLMD_SHA3_384",
672         [88] = "KLMD_SHA3_512",
673         [89] = "KLMD_SHAKE_128",
674         [90] = "KLMD_SHAKE_256",
675         [91] = "KMAC_DEA",
676         [92] = "KMAC_TDEA_128",
677         [93] = "KMAC_TDEA_192",
678         [94] = "KMAC_ENCRYPTED_DEA",
679         [95] = "KMAC_ENCRYPTED_TDEA_128",
680         [96] = "KMAC_ENCRYPTED_TDEA_192",
681         [97] = "KMAC_AES_128",
682         [98] = "KMAC_AES_192",
683         [99] = "KMAC_AES_256",
684         [100] = "KMAC_ENCRYPTED_AES_128",
685         [101] = "KMAC_ENCRYPTED_AES_192",
686         [102] = "KMAC_ENCRYPTED_AES_256",
687         [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
688         [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
689         [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
690         [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
691         [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
692         [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
693         [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
694         [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
695         [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
696         [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
697         [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
698         [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
699         [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
700         [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
701         [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
702         [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
703         [119] = "PCC_SCALAR_MULTIPLY_P256",
704         [120] = "PCC_SCALAR_MULTIPLY_P384",
705         [121] = "PCC_SCALAR_MULTIPLY_P521",
706         [122] = "PCC_SCALAR_MULTIPLY_ED25519",
707         [123] = "PCC_SCALAR_MULTIPLY_ED448",
708         [124] = "PCC_SCALAR_MULTIPLY_X25519",
709         [125] = "PCC_SCALAR_MULTIPLY_X448",
710         [126] = "PRNO_SHA_512_DRNG",
711         [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
712         [128] = "PRNO_TRNG",
713         [129] = "KDSA_ECDSA_VERIFY_P256",
714         [130] = "KDSA_ECDSA_VERIFY_P384",
715         [131] = "KDSA_ECDSA_VERIFY_P521",
716         [132] = "KDSA_ECDSA_SIGN_P256",
717         [133] = "KDSA_ECDSA_SIGN_P384",
718         [134] = "KDSA_ECDSA_SIGN_P521",
719         [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
720         [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
721         [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
722         [138] = "KDSA_EDDSA_VERIFY_ED25519",
723         [139] = "KDSA_EDDSA_VERIFY_ED448",
724         [140] = "KDSA_EDDSA_SIGN_ED25519",
725         [141] = "KDSA_EDDSA_SIGN_ED448",
726         [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
727         [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
728         [144] = "PCKMO_ENCRYPT_DEA_KEY",
729         [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
730         [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
731         [147] = "PCKMO_ENCRYPT_AES_128_KEY",
732         [148] = "PCKMO_ENCRYPT_AES_192_KEY",
733         [149] = "PCKMO_ENCRYPT_AES_256_KEY",
734         [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
735         [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
736         [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
737         [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
738         [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
739         [155] = "IBM_RESERVED_155",
740         [156] = "IBM_RESERVED_156",
741 };
742 
743 static void __init attr_event_free(struct attribute **attrs, int num)
744 {
745         struct perf_pmu_events_attr *pa;
746         int i;
747 
748         for (i = 0; i < num; i++) {
749                 struct device_attribute *dap;
750 
751                 dap = container_of(attrs[i], struct device_attribute, attr);
752                 pa = container_of(dap, struct perf_pmu_events_attr, attr);
753                 kfree(pa);
754         }
755         kfree(attrs);
756 }
757 
758 static int __init attr_event_init_one(struct attribute **attrs, int num)
759 {
760         struct perf_pmu_events_attr *pa;
761 
762         /* Index larger than array_size, no counter name available */
763         if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
764                 attrs[num] = NULL;
765                 return 0;
766         }
767 
768         pa = kzalloc(sizeof(*pa), GFP_KERNEL);
769         if (!pa)
770                 return -ENOMEM;
771 
772         sysfs_attr_init(&pa->attr.attr);
773         pa->id = PAI_CRYPTO_BASE + num;
774         pa->attr.attr.name = paicrypt_ctrnames[num];
775         pa->attr.attr.mode = 0444;
776         pa->attr.show = cpumf_events_sysfs_show;
777         pa->attr.store = NULL;
778         attrs[num] = &pa->attr.attr;
779         return 0;
780 }
781 
782 /* Create PMU sysfs event attributes on the fly. */
783 static int __init attr_event_init(void)
784 {
785         struct attribute **attrs;
786         int ret, i;
787 
788         attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
789         if (!attrs)
790                 return -ENOMEM;
791         for (i = 0; i <= paicrypt_cnt; i++) {
792                 ret = attr_event_init_one(attrs, i);
793                 if (ret) {
794                         attr_event_free(attrs, i);
795                         return ret;
796                 }
797         }
798         attrs[i] = NULL;
799         paicrypt_events_group.attrs = attrs;
800         return 0;
801 }
802 
803 static int __init paicrypt_init(void)
804 {
805         struct qpaci_info_block ib;
806         int rc;
807 
808         if (!test_facility(196))
809                 return 0;
810 
811         qpaci(&ib);
812         paicrypt_cnt = ib.num_cc;
813         if (paicrypt_cnt == 0)
814                 return 0;
815         if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
816                 pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
817                 return -E2BIG;
818         }
819 
820         rc = attr_event_init();         /* Export known PAI crypto events */
821         if (rc) {
822                 pr_err("Creation of PMU pai_crypto /sysfs failed\n");
823                 return rc;
824         }
825 
826         /* Setup s390dbf facility */
827         cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
828         if (!cfm_dbg) {
829                 pr_err("Registration of s390dbf pai_crypto failed\n");
830                 return -ENOMEM;
831         }
832         debug_register_view(cfm_dbg, &debug_sprintf_view);
833 
834         rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
835         if (rc) {
836                 pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
837                        rc);
838                 debug_unregister_view(cfm_dbg, &debug_sprintf_view);
839                 debug_unregister(cfm_dbg);
840                 return rc;
841         }
842         return 0;
843 }
844 
845 device_initcall(paicrypt_init);
846 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php