~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/trace/trace_events_user.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (c) 2021, Microsoft Corporation.
  4  *
  5  * Authors:
  6  *   Beau Belgrave <beaub@linux.microsoft.com>
  7  */
  8 
  9 #include <linux/bitmap.h>
 10 #include <linux/cdev.h>
 11 #include <linux/hashtable.h>
 12 #include <linux/list.h>
 13 #include <linux/io.h>
 14 #include <linux/uio.h>
 15 #include <linux/ioctl.h>
 16 #include <linux/jhash.h>
 17 #include <linux/refcount.h>
 18 #include <linux/trace_events.h>
 19 #include <linux/tracefs.h>
 20 #include <linux/types.h>
 21 #include <linux/uaccess.h>
 22 #include <linux/highmem.h>
 23 #include <linux/init.h>
 24 #include <linux/user_events.h>
 25 #include "trace_dynevent.h"
 26 #include "trace_output.h"
 27 #include "trace.h"
 28 
 29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
 30 
 31 #define FIELD_DEPTH_TYPE 0
 32 #define FIELD_DEPTH_NAME 1
 33 #define FIELD_DEPTH_SIZE 2
 34 
 35 /* Limit how long of an event name plus args within the subsystem. */
 36 #define MAX_EVENT_DESC 512
 37 #define EVENT_NAME(user_event) ((user_event)->reg_name)
 38 #define EVENT_TP_NAME(user_event) ((user_event)->tracepoint.name)
 39 #define MAX_FIELD_ARRAY_SIZE 1024
 40 
 41 /*
 42  * Internal bits (kernel side only) to keep track of connected probes:
 43  * These are used when status is requested in text form about an event. These
 44  * bits are compared against an internal byte on the event to determine which
 45  * probes to print out to the user.
 46  *
 47  * These do not reflect the mapped bytes between the user and kernel space.
 48  */
 49 #define EVENT_STATUS_FTRACE BIT(0)
 50 #define EVENT_STATUS_PERF BIT(1)
 51 #define EVENT_STATUS_OTHER BIT(7)
 52 
 53 /*
 54  * Stores the system name, tables, and locks for a group of events. This
 55  * allows isolation for events by various means.
 56  */
 57 struct user_event_group {
 58         char                    *system_name;
 59         char                    *system_multi_name;
 60         struct hlist_node       node;
 61         struct mutex            reg_mutex;
 62         DECLARE_HASHTABLE(register_table, 8);
 63         /* ID that moves forward within the group for multi-event names */
 64         u64                     multi_id;
 65 };
 66 
 67 /* Group for init_user_ns mapping, top-most group */
 68 static struct user_event_group *init_group;
 69 
 70 /* Max allowed events for the whole system */
 71 static unsigned int max_user_events = 32768;
 72 
 73 /* Current number of events on the whole system */
 74 static unsigned int current_user_events;
 75 
 76 /*
 77  * Stores per-event properties, as users register events
 78  * within a file a user_event might be created if it does not
 79  * already exist. These are globally used and their lifetime
 80  * is tied to the refcnt member. These cannot go away until the
 81  * refcnt reaches one.
 82  */
 83 struct user_event {
 84         struct user_event_group         *group;
 85         char                            *reg_name;
 86         struct tracepoint               tracepoint;
 87         struct trace_event_call         call;
 88         struct trace_event_class        class;
 89         struct dyn_event                devent;
 90         struct hlist_node               node;
 91         struct list_head                fields;
 92         struct list_head                validators;
 93         struct work_struct              put_work;
 94         refcount_t                      refcnt;
 95         int                             min_size;
 96         int                             reg_flags;
 97         char                            status;
 98 };
 99 
100 /*
101  * Stores per-mm/event properties that enable an address to be
102  * updated properly for each task. As tasks are forked, we use
103  * these to track enablement sites that are tied to an event.
104  */
105 struct user_event_enabler {
106         struct list_head        mm_enablers_link;
107         struct user_event       *event;
108         unsigned long           addr;
109 
110         /* Track enable bit, flags, etc. Aligned for bitops. */
111         unsigned long           values;
112 };
113 
114 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
115 #define ENABLE_VAL_BIT_MASK 0x3F
116 
117 /* Bit 6 is for faulting status of enablement */
118 #define ENABLE_VAL_FAULTING_BIT 6
119 
120 /* Bit 7 is for freeing status of enablement */
121 #define ENABLE_VAL_FREEING_BIT 7
122 
123 /* Bit 8 is for marking 32-bit on 64-bit */
124 #define ENABLE_VAL_32_ON_64_BIT 8
125 
126 #define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
127 
128 /* Only duplicate the bit and compat values */
129 #define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
130 
131 #define ENABLE_BITOPS(e) (&(e)->values)
132 
133 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
134 
135 #define EVENT_MULTI_FORMAT(f) ((f) & USER_EVENT_REG_MULTI_FORMAT)
136 
137 /* Used for asynchronous faulting in of pages */
138 struct user_event_enabler_fault {
139         struct work_struct              work;
140         struct user_event_mm            *mm;
141         struct user_event_enabler       *enabler;
142         int                             attempt;
143 };
144 
145 static struct kmem_cache *fault_cache;
146 
147 /* Global list of memory descriptors using user_events */
148 static LIST_HEAD(user_event_mms);
149 static DEFINE_SPINLOCK(user_event_mms_lock);
150 
151 /*
152  * Stores per-file events references, as users register events
153  * within a file this structure is modified and freed via RCU.
154  * The lifetime of this struct is tied to the lifetime of the file.
155  * These are not shared and only accessible by the file that created it.
156  */
157 struct user_event_refs {
158         struct rcu_head         rcu;
159         int                     count;
160         struct user_event       *events[];
161 };
162 
163 struct user_event_file_info {
164         struct user_event_group *group;
165         struct user_event_refs  *refs;
166 };
167 
168 #define VALIDATOR_ENSURE_NULL (1 << 0)
169 #define VALIDATOR_REL (1 << 1)
170 
171 struct user_event_validator {
172         struct list_head        user_event_link;
173         int                     offset;
174         int                     flags;
175 };
176 
177 static inline void align_addr_bit(unsigned long *addr, int *bit,
178                                   unsigned long *flags)
179 {
180         if (IS_ALIGNED(*addr, sizeof(long))) {
181 #ifdef __BIG_ENDIAN
182                 /* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
183                 if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
184                         *bit += 32;
185 #endif
186                 return;
187         }
188 
189         *addr = ALIGN_DOWN(*addr, sizeof(long));
190 
191         /*
192          * We only support 32 and 64 bit values. The only time we need
193          * to align is a 32 bit value on a 64 bit kernel, which on LE
194          * is always 32 bits, and on BE requires no change when unaligned.
195          */
196 #ifdef __LITTLE_ENDIAN
197         *bit += 32;
198 #endif
199 }
200 
201 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
202                                    void *tpdata, bool *faulted);
203 
204 static int user_event_parse(struct user_event_group *group, char *name,
205                             char *args, char *flags,
206                             struct user_event **newuser, int reg_flags);
207 
208 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
209 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
210 static void user_event_mm_put(struct user_event_mm *mm);
211 static int destroy_user_event(struct user_event *user);
212 static bool user_fields_match(struct user_event *user, int argc,
213                               const char **argv);
214 
215 static u32 user_event_key(char *name)
216 {
217         return jhash(name, strlen(name), 0);
218 }
219 
220 static bool user_event_capable(u16 reg_flags)
221 {
222         /* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */
223         if (reg_flags & USER_EVENT_REG_PERSIST) {
224                 if (!perfmon_capable())
225                         return false;
226         }
227 
228         return true;
229 }
230 
231 static struct user_event *user_event_get(struct user_event *user)
232 {
233         refcount_inc(&user->refcnt);
234 
235         return user;
236 }
237 
238 static void delayed_destroy_user_event(struct work_struct *work)
239 {
240         struct user_event *user = container_of(
241                 work, struct user_event, put_work);
242 
243         mutex_lock(&event_mutex);
244 
245         if (!refcount_dec_and_test(&user->refcnt))
246                 goto out;
247 
248         if (destroy_user_event(user)) {
249                 /*
250                  * The only reason this would fail here is if we cannot
251                  * update the visibility of the event. In this case the
252                  * event stays in the hashtable, waiting for someone to
253                  * attempt to delete it later.
254                  */
255                 pr_warn("user_events: Unable to delete event\n");
256                 refcount_set(&user->refcnt, 1);
257         }
258 out:
259         mutex_unlock(&event_mutex);
260 }
261 
262 static void user_event_put(struct user_event *user, bool locked)
263 {
264         bool delete;
265 
266         if (unlikely(!user))
267                 return;
268 
269         /*
270          * When the event is not enabled for auto-delete there will always
271          * be at least 1 reference to the event. During the event creation
272          * we initially set the refcnt to 2 to achieve this. In those cases
273          * the caller must acquire event_mutex and after decrement check if
274          * the refcnt is 1, meaning this is the last reference. When auto
275          * delete is enabled, there will only be 1 ref, IE: refcnt will be
276          * only set to 1 during creation to allow the below checks to go
277          * through upon the last put. The last put must always be done with
278          * the event mutex held.
279          */
280         if (!locked) {
281                 lockdep_assert_not_held(&event_mutex);
282                 delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
283         } else {
284                 lockdep_assert_held(&event_mutex);
285                 delete = refcount_dec_and_test(&user->refcnt);
286         }
287 
288         if (!delete)
289                 return;
290 
291         /*
292          * We now have the event_mutex in all cases, which ensures that
293          * no new references will be taken until event_mutex is released.
294          * New references come through find_user_event(), which requires
295          * the event_mutex to be held.
296          */
297 
298         if (user->reg_flags & USER_EVENT_REG_PERSIST) {
299                 /* We should not get here when persist flag is set */
300                 pr_alert("BUG: Auto-delete engaged on persistent event\n");
301                 goto out;
302         }
303 
304         /*
305          * Unfortunately we have to attempt the actual destroy in a work
306          * queue. This is because not all cases handle a trace_event_call
307          * being removed within the class->reg() operation for unregister.
308          */
309         INIT_WORK(&user->put_work, delayed_destroy_user_event);
310 
311         /*
312          * Since the event is still in the hashtable, we have to re-inc
313          * the ref count to 1. This count will be decremented and checked
314          * in the work queue to ensure it's still the last ref. This is
315          * needed because a user-process could register the same event in
316          * between the time of event_mutex release and the work queue
317          * running the delayed destroy. If we removed the item now from
318          * the hashtable, this would result in a timing window where a
319          * user process would fail a register because the trace_event_call
320          * register would fail in the tracing layers.
321          */
322         refcount_set(&user->refcnt, 1);
323 
324         if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
325                 /*
326                  * If we fail we must wait for an admin to attempt delete or
327                  * another register/close of the event, whichever is first.
328                  */
329                 pr_warn("user_events: Unable to queue delayed destroy\n");
330         }
331 out:
332         /* Ensure if we didn't have event_mutex before we unlock it */
333         if (!locked)
334                 mutex_unlock(&event_mutex);
335 }
336 
337 static void user_event_group_destroy(struct user_event_group *group)
338 {
339         kfree(group->system_name);
340         kfree(group->system_multi_name);
341         kfree(group);
342 }
343 
344 static char *user_event_group_system_name(void)
345 {
346         char *system_name;
347         int len = sizeof(USER_EVENTS_SYSTEM) + 1;
348 
349         system_name = kmalloc(len, GFP_KERNEL);
350 
351         if (!system_name)
352                 return NULL;
353 
354         snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
355 
356         return system_name;
357 }
358 
359 static char *user_event_group_system_multi_name(void)
360 {
361         return kstrdup(USER_EVENTS_MULTI_SYSTEM, GFP_KERNEL);
362 }
363 
364 static struct user_event_group *current_user_event_group(void)
365 {
366         return init_group;
367 }
368 
369 static struct user_event_group *user_event_group_create(void)
370 {
371         struct user_event_group *group;
372 
373         group = kzalloc(sizeof(*group), GFP_KERNEL);
374 
375         if (!group)
376                 return NULL;
377 
378         group->system_name = user_event_group_system_name();
379 
380         if (!group->system_name)
381                 goto error;
382 
383         group->system_multi_name = user_event_group_system_multi_name();
384 
385         if (!group->system_multi_name)
386                 goto error;
387 
388         mutex_init(&group->reg_mutex);
389         hash_init(group->register_table);
390 
391         return group;
392 error:
393         if (group)
394                 user_event_group_destroy(group);
395 
396         return NULL;
397 };
398 
399 static void user_event_enabler_destroy(struct user_event_enabler *enabler,
400                                        bool locked)
401 {
402         list_del_rcu(&enabler->mm_enablers_link);
403 
404         /* No longer tracking the event via the enabler */
405         user_event_put(enabler->event, locked);
406 
407         kfree(enabler);
408 }
409 
410 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
411                                   int attempt)
412 {
413         bool unlocked;
414         int ret;
415 
416         /*
417          * Normally this is low, ensure that it cannot be taken advantage of by
418          * bad user processes to cause excessive looping.
419          */
420         if (attempt > 10)
421                 return -EFAULT;
422 
423         mmap_read_lock(mm->mm);
424 
425         /* Ensure MM has tasks, cannot use after exit_mm() */
426         if (refcount_read(&mm->tasks) == 0) {
427                 ret = -ENOENT;
428                 goto out;
429         }
430 
431         ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
432                                &unlocked);
433 out:
434         mmap_read_unlock(mm->mm);
435 
436         return ret;
437 }
438 
439 static int user_event_enabler_write(struct user_event_mm *mm,
440                                     struct user_event_enabler *enabler,
441                                     bool fixup_fault, int *attempt);
442 
443 static void user_event_enabler_fault_fixup(struct work_struct *work)
444 {
445         struct user_event_enabler_fault *fault = container_of(
446                 work, struct user_event_enabler_fault, work);
447         struct user_event_enabler *enabler = fault->enabler;
448         struct user_event_mm *mm = fault->mm;
449         unsigned long uaddr = enabler->addr;
450         int attempt = fault->attempt;
451         int ret;
452 
453         ret = user_event_mm_fault_in(mm, uaddr, attempt);
454 
455         if (ret && ret != -ENOENT) {
456                 struct user_event *user = enabler->event;
457 
458                 pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
459                         mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
460         }
461 
462         /* Prevent state changes from racing */
463         mutex_lock(&event_mutex);
464 
465         /* User asked for enabler to be removed during fault */
466         if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
467                 user_event_enabler_destroy(enabler, true);
468                 goto out;
469         }
470 
471         /*
472          * If we managed to get the page, re-issue the write. We do not
473          * want to get into a possible infinite loop, which is why we only
474          * attempt again directly if the page came in. If we couldn't get
475          * the page here, then we will try again the next time the event is
476          * enabled/disabled.
477          */
478         clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
479 
480         if (!ret) {
481                 mmap_read_lock(mm->mm);
482                 user_event_enabler_write(mm, enabler, true, &attempt);
483                 mmap_read_unlock(mm->mm);
484         }
485 out:
486         mutex_unlock(&event_mutex);
487 
488         /* In all cases we no longer need the mm or fault */
489         user_event_mm_put(mm);
490         kmem_cache_free(fault_cache, fault);
491 }
492 
493 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
494                                            struct user_event_enabler *enabler,
495                                            int attempt)
496 {
497         struct user_event_enabler_fault *fault;
498 
499         fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
500 
501         if (!fault)
502                 return false;
503 
504         INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
505         fault->mm = user_event_mm_get(mm);
506         fault->enabler = enabler;
507         fault->attempt = attempt;
508 
509         /* Don't try to queue in again while we have a pending fault */
510         set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
511 
512         if (!schedule_work(&fault->work)) {
513                 /* Allow another attempt later */
514                 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
515 
516                 user_event_mm_put(mm);
517                 kmem_cache_free(fault_cache, fault);
518 
519                 return false;
520         }
521 
522         return true;
523 }
524 
525 static int user_event_enabler_write(struct user_event_mm *mm,
526                                     struct user_event_enabler *enabler,
527                                     bool fixup_fault, int *attempt)
528 {
529         unsigned long uaddr = enabler->addr;
530         unsigned long *ptr;
531         struct page *page;
532         void *kaddr;
533         int bit = ENABLE_BIT(enabler);
534         int ret;
535 
536         lockdep_assert_held(&event_mutex);
537         mmap_assert_locked(mm->mm);
538 
539         *attempt += 1;
540 
541         /* Ensure MM has tasks, cannot use after exit_mm() */
542         if (refcount_read(&mm->tasks) == 0)
543                 return -ENOENT;
544 
545         if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
546                      test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
547                 return -EBUSY;
548 
549         align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
550 
551         ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
552                                     &page, NULL);
553 
554         if (unlikely(ret <= 0)) {
555                 if (!fixup_fault)
556                         return -EFAULT;
557 
558                 if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
559                         pr_warn("user_events: Unable to queue fault handler\n");
560 
561                 return -EFAULT;
562         }
563 
564         kaddr = kmap_local_page(page);
565         ptr = kaddr + (uaddr & ~PAGE_MASK);
566 
567         /* Update bit atomically, user tracers must be atomic as well */
568         if (enabler->event && enabler->event->status)
569                 set_bit(bit, ptr);
570         else
571                 clear_bit(bit, ptr);
572 
573         kunmap_local(kaddr);
574         unpin_user_pages_dirty_lock(&page, 1, true);
575 
576         return 0;
577 }
578 
579 static bool user_event_enabler_exists(struct user_event_mm *mm,
580                                       unsigned long uaddr, unsigned char bit)
581 {
582         struct user_event_enabler *enabler;
583 
584         list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
585                 if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
586                         return true;
587         }
588 
589         return false;
590 }
591 
592 static void user_event_enabler_update(struct user_event *user)
593 {
594         struct user_event_enabler *enabler;
595         struct user_event_mm *next;
596         struct user_event_mm *mm;
597         int attempt;
598 
599         lockdep_assert_held(&event_mutex);
600 
601         /*
602          * We need to build a one-shot list of all the mms that have an
603          * enabler for the user_event passed in. This list is only valid
604          * while holding the event_mutex. The only reason for this is due
605          * to the global mm list being RCU protected and we use methods
606          * which can wait (mmap_read_lock and pin_user_pages_remote).
607          *
608          * NOTE: user_event_mm_get_all() increments the ref count of each
609          * mm that is added to the list to prevent removal timing windows.
610          * We must always put each mm after they are used, which may wait.
611          */
612         mm = user_event_mm_get_all(user);
613 
614         while (mm) {
615                 next = mm->next;
616                 mmap_read_lock(mm->mm);
617 
618                 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
619                         if (enabler->event == user) {
620                                 attempt = 0;
621                                 user_event_enabler_write(mm, enabler, true, &attempt);
622                         }
623                 }
624 
625                 mmap_read_unlock(mm->mm);
626                 user_event_mm_put(mm);
627                 mm = next;
628         }
629 }
630 
631 static bool user_event_enabler_dup(struct user_event_enabler *orig,
632                                    struct user_event_mm *mm)
633 {
634         struct user_event_enabler *enabler;
635 
636         /* Skip pending frees */
637         if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
638                 return true;
639 
640         enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
641 
642         if (!enabler)
643                 return false;
644 
645         enabler->event = user_event_get(orig->event);
646         enabler->addr = orig->addr;
647 
648         /* Only dup part of value (ignore future flags, etc) */
649         enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
650 
651         /* Enablers not exposed yet, RCU not required */
652         list_add(&enabler->mm_enablers_link, &mm->enablers);
653 
654         return true;
655 }
656 
657 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
658 {
659         refcount_inc(&mm->refcnt);
660 
661         return mm;
662 }
663 
664 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
665 {
666         struct user_event_mm *found = NULL;
667         struct user_event_enabler *enabler;
668         struct user_event_mm *mm;
669 
670         /*
671          * We use the mm->next field to build a one-shot list from the global
672          * RCU protected list. To build this list the event_mutex must be held.
673          * This lets us build a list without requiring allocs that could fail
674          * when user based events are most wanted for diagnostics.
675          */
676         lockdep_assert_held(&event_mutex);
677 
678         /*
679          * We do not want to block fork/exec while enablements are being
680          * updated, so we use RCU to walk the current tasks that have used
681          * user_events ABI for 1 or more events. Each enabler found in each
682          * task that matches the event being updated has a write to reflect
683          * the kernel state back into the process. Waits/faults must not occur
684          * during this. So we scan the list under RCU for all the mm that have
685          * the event within it. This is needed because mm_read_lock() can wait.
686          * Each user mm returned has a ref inc to handle remove RCU races.
687          */
688         rcu_read_lock();
689 
690         list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
691                 list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
692                         if (enabler->event == user) {
693                                 mm->next = found;
694                                 found = user_event_mm_get(mm);
695                                 break;
696                         }
697                 }
698         }
699 
700         rcu_read_unlock();
701 
702         return found;
703 }
704 
705 static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
706 {
707         struct user_event_mm *user_mm;
708 
709         user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
710 
711         if (!user_mm)
712                 return NULL;
713 
714         user_mm->mm = t->mm;
715         INIT_LIST_HEAD(&user_mm->enablers);
716         refcount_set(&user_mm->refcnt, 1);
717         refcount_set(&user_mm->tasks, 1);
718 
719         /*
720          * The lifetime of the memory descriptor can slightly outlast
721          * the task lifetime if a ref to the user_event_mm is taken
722          * between list_del_rcu() and call_rcu(). Therefore we need
723          * to take a reference to it to ensure it can live this long
724          * under this corner case. This can also occur in clones that
725          * outlast the parent.
726          */
727         mmgrab(user_mm->mm);
728 
729         return user_mm;
730 }
731 
732 static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
733 {
734         unsigned long flags;
735 
736         spin_lock_irqsave(&user_event_mms_lock, flags);
737         list_add_rcu(&user_mm->mms_link, &user_event_mms);
738         spin_unlock_irqrestore(&user_event_mms_lock, flags);
739 
740         t->user_event_mm = user_mm;
741 }
742 
743 static struct user_event_mm *current_user_event_mm(void)
744 {
745         struct user_event_mm *user_mm = current->user_event_mm;
746 
747         if (user_mm)
748                 goto inc;
749 
750         user_mm = user_event_mm_alloc(current);
751 
752         if (!user_mm)
753                 goto error;
754 
755         user_event_mm_attach(user_mm, current);
756 inc:
757         refcount_inc(&user_mm->refcnt);
758 error:
759         return user_mm;
760 }
761 
762 static void user_event_mm_destroy(struct user_event_mm *mm)
763 {
764         struct user_event_enabler *enabler, *next;
765 
766         list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
767                 user_event_enabler_destroy(enabler, false);
768 
769         mmdrop(mm->mm);
770         kfree(mm);
771 }
772 
773 static void user_event_mm_put(struct user_event_mm *mm)
774 {
775         if (mm && refcount_dec_and_test(&mm->refcnt))
776                 user_event_mm_destroy(mm);
777 }
778 
779 static void delayed_user_event_mm_put(struct work_struct *work)
780 {
781         struct user_event_mm *mm;
782 
783         mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
784         user_event_mm_put(mm);
785 }
786 
787 void user_event_mm_remove(struct task_struct *t)
788 {
789         struct user_event_mm *mm;
790         unsigned long flags;
791 
792         might_sleep();
793 
794         mm = t->user_event_mm;
795         t->user_event_mm = NULL;
796 
797         /* Clone will increment the tasks, only remove if last clone */
798         if (!refcount_dec_and_test(&mm->tasks))
799                 return;
800 
801         /* Remove the mm from the list, so it can no longer be enabled */
802         spin_lock_irqsave(&user_event_mms_lock, flags);
803         list_del_rcu(&mm->mms_link);
804         spin_unlock_irqrestore(&user_event_mms_lock, flags);
805 
806         /*
807          * We need to wait for currently occurring writes to stop within
808          * the mm. This is required since exit_mm() snaps the current rss
809          * stats and clears them. On the final mmdrop(), check_mm() will
810          * report a bug if these increment.
811          *
812          * All writes/pins are done under mmap_read lock, take the write
813          * lock to ensure in-progress faults have completed. Faults that
814          * are pending but yet to run will check the task count and skip
815          * the fault since the mm is going away.
816          */
817         mmap_write_lock(mm->mm);
818         mmap_write_unlock(mm->mm);
819 
820         /*
821          * Put for mm must be done after RCU delay to handle new refs in
822          * between the list_del_rcu() and now. This ensures any get refs
823          * during rcu_read_lock() are accounted for during list removal.
824          *
825          * CPU A                        |       CPU B
826          * ---------------------------------------------------------------
827          * user_event_mm_remove()       |       rcu_read_lock();
828          * list_del_rcu()               |       list_for_each_entry_rcu();
829          * call_rcu()                   |       refcount_inc();
830          * .                            |       rcu_read_unlock();
831          * schedule_work()              |       .
832          * user_event_mm_put()          |       .
833          *
834          * mmdrop() cannot be called in the softirq context of call_rcu()
835          * so we use a work queue after call_rcu() to run within.
836          */
837         INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
838         queue_rcu_work(system_wq, &mm->put_rwork);
839 }
840 
841 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
842 {
843         struct user_event_mm *mm = user_event_mm_alloc(t);
844         struct user_event_enabler *enabler;
845 
846         if (!mm)
847                 return;
848 
849         rcu_read_lock();
850 
851         list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
852                 if (!user_event_enabler_dup(enabler, mm))
853                         goto error;
854         }
855 
856         rcu_read_unlock();
857 
858         user_event_mm_attach(mm, t);
859         return;
860 error:
861         rcu_read_unlock();
862         user_event_mm_destroy(mm);
863 }
864 
865 static bool current_user_event_enabler_exists(unsigned long uaddr,
866                                               unsigned char bit)
867 {
868         struct user_event_mm *user_mm = current_user_event_mm();
869         bool exists;
870 
871         if (!user_mm)
872                 return false;
873 
874         exists = user_event_enabler_exists(user_mm, uaddr, bit);
875 
876         user_event_mm_put(user_mm);
877 
878         return exists;
879 }
880 
881 static struct user_event_enabler
882 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
883                            int *write_result)
884 {
885         struct user_event_enabler *enabler;
886         struct user_event_mm *user_mm;
887         unsigned long uaddr = (unsigned long)reg->enable_addr;
888         int attempt = 0;
889 
890         user_mm = current_user_event_mm();
891 
892         if (!user_mm)
893                 return NULL;
894 
895         enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
896 
897         if (!enabler)
898                 goto out;
899 
900         enabler->event = user;
901         enabler->addr = uaddr;
902         enabler->values = reg->enable_bit;
903 
904 #if BITS_PER_LONG >= 64
905         if (reg->enable_size == 4)
906                 set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
907 #endif
908 
909 retry:
910         /* Prevents state changes from racing with new enablers */
911         mutex_lock(&event_mutex);
912 
913         /* Attempt to reflect the current state within the process */
914         mmap_read_lock(user_mm->mm);
915         *write_result = user_event_enabler_write(user_mm, enabler, false,
916                                                  &attempt);
917         mmap_read_unlock(user_mm->mm);
918 
919         /*
920          * If the write works, then we will track the enabler. A ref to the
921          * underlying user_event is held by the enabler to prevent it going
922          * away while the enabler is still in use by a process. The ref is
923          * removed when the enabler is destroyed. This means a event cannot
924          * be forcefully deleted from the system until all tasks using it
925          * exit or run exec(), which includes forks and clones.
926          */
927         if (!*write_result) {
928                 user_event_get(user);
929                 list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
930         }
931 
932         mutex_unlock(&event_mutex);
933 
934         if (*write_result) {
935                 /* Attempt to fault-in and retry if it worked */
936                 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
937                         goto retry;
938 
939                 kfree(enabler);
940                 enabler = NULL;
941         }
942 out:
943         user_event_mm_put(user_mm);
944 
945         return enabler;
946 }
947 
948 static __always_inline __must_check
949 bool user_event_last_ref(struct user_event *user)
950 {
951         int last = 0;
952 
953         if (user->reg_flags & USER_EVENT_REG_PERSIST)
954                 last = 1;
955 
956         return refcount_read(&user->refcnt) == last;
957 }
958 
959 static __always_inline __must_check
960 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
961 {
962         size_t ret;
963 
964         pagefault_disable();
965 
966         ret = copy_from_iter_nocache(addr, bytes, i);
967 
968         pagefault_enable();
969 
970         return ret;
971 }
972 
973 static struct list_head *user_event_get_fields(struct trace_event_call *call)
974 {
975         struct user_event *user = (struct user_event *)call->data;
976 
977         return &user->fields;
978 }
979 
980 /*
981  * Parses a register command for user_events
982  * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
983  *
984  * Example event named 'test' with a 20 char 'msg' field with an unsigned int
985  * 'id' field after:
986  * test char[20] msg;unsigned int id
987  *
988  * NOTE: Offsets are from the user data perspective, they are not from the
989  * trace_entry/buffer perspective. We automatically add the common properties
990  * sizes to the offset for the user.
991  *
992  * Upon success user_event has its ref count increased by 1.
993  */
994 static int user_event_parse_cmd(struct user_event_group *group,
995                                 char *raw_command, struct user_event **newuser,
996                                 int reg_flags)
997 {
998         char *name = raw_command;
999         char *args = strpbrk(name, " ");
1000         char *flags;
1001 
1002         if (args)
1003                 *args++ = '\0';
1004 
1005         flags = strpbrk(name, ":");
1006 
1007         if (flags)
1008                 *flags++ = '\0';
1009 
1010         return user_event_parse(group, name, args, flags, newuser, reg_flags);
1011 }
1012 
1013 static int user_field_array_size(const char *type)
1014 {
1015         const char *start = strchr(type, '[');
1016         char val[8];
1017         char *bracket;
1018         int size = 0;
1019 
1020         if (start == NULL)
1021                 return -EINVAL;
1022 
1023         if (strscpy(val, start + 1, sizeof(val)) <= 0)
1024                 return -EINVAL;
1025 
1026         bracket = strchr(val, ']');
1027 
1028         if (!bracket)
1029                 return -EINVAL;
1030 
1031         *bracket = '\0';
1032 
1033         if (kstrtouint(val, 0, &size))
1034                 return -EINVAL;
1035 
1036         if (size > MAX_FIELD_ARRAY_SIZE)
1037                 return -EINVAL;
1038 
1039         return size;
1040 }
1041 
1042 static int user_field_size(const char *type)
1043 {
1044         /* long is not allowed from a user, since it's ambigious in size */
1045         if (strcmp(type, "s64") == 0)
1046                 return sizeof(s64);
1047         if (strcmp(type, "u64") == 0)
1048                 return sizeof(u64);
1049         if (strcmp(type, "s32") == 0)
1050                 return sizeof(s32);
1051         if (strcmp(type, "u32") == 0)
1052                 return sizeof(u32);
1053         if (strcmp(type, "int") == 0)
1054                 return sizeof(int);
1055         if (strcmp(type, "unsigned int") == 0)
1056                 return sizeof(unsigned int);
1057         if (strcmp(type, "s16") == 0)
1058                 return sizeof(s16);
1059         if (strcmp(type, "u16") == 0)
1060                 return sizeof(u16);
1061         if (strcmp(type, "short") == 0)
1062                 return sizeof(short);
1063         if (strcmp(type, "unsigned short") == 0)
1064                 return sizeof(unsigned short);
1065         if (strcmp(type, "s8") == 0)
1066                 return sizeof(s8);
1067         if (strcmp(type, "u8") == 0)
1068                 return sizeof(u8);
1069         if (strcmp(type, "char") == 0)
1070                 return sizeof(char);
1071         if (strcmp(type, "unsigned char") == 0)
1072                 return sizeof(unsigned char);
1073         if (str_has_prefix(type, "char["))
1074                 return user_field_array_size(type);
1075         if (str_has_prefix(type, "unsigned char["))
1076                 return user_field_array_size(type);
1077         if (str_has_prefix(type, "__data_loc "))
1078                 return sizeof(u32);
1079         if (str_has_prefix(type, "__rel_loc "))
1080                 return sizeof(u32);
1081 
1082         /* Uknown basic type, error */
1083         return -EINVAL;
1084 }
1085 
1086 static void user_event_destroy_validators(struct user_event *user)
1087 {
1088         struct user_event_validator *validator, *next;
1089         struct list_head *head = &user->validators;
1090 
1091         list_for_each_entry_safe(validator, next, head, user_event_link) {
1092                 list_del(&validator->user_event_link);
1093                 kfree(validator);
1094         }
1095 }
1096 
1097 static void user_event_destroy_fields(struct user_event *user)
1098 {
1099         struct ftrace_event_field *field, *next;
1100         struct list_head *head = &user->fields;
1101 
1102         list_for_each_entry_safe(field, next, head, link) {
1103                 list_del(&field->link);
1104                 kfree(field);
1105         }
1106 }
1107 
1108 static int user_event_add_field(struct user_event *user, const char *type,
1109                                 const char *name, int offset, int size,
1110                                 int is_signed, int filter_type)
1111 {
1112         struct user_event_validator *validator;
1113         struct ftrace_event_field *field;
1114         int validator_flags = 0;
1115 
1116         field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
1117 
1118         if (!field)
1119                 return -ENOMEM;
1120 
1121         if (str_has_prefix(type, "__data_loc "))
1122                 goto add_validator;
1123 
1124         if (str_has_prefix(type, "__rel_loc ")) {
1125                 validator_flags |= VALIDATOR_REL;
1126                 goto add_validator;
1127         }
1128 
1129         goto add_field;
1130 
1131 add_validator:
1132         if (strstr(type, "char") != NULL)
1133                 validator_flags |= VALIDATOR_ENSURE_NULL;
1134 
1135         validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
1136 
1137         if (!validator) {
1138                 kfree(field);
1139                 return -ENOMEM;
1140         }
1141 
1142         validator->flags = validator_flags;
1143         validator->offset = offset;
1144 
1145         /* Want sequential access when validating */
1146         list_add_tail(&validator->user_event_link, &user->validators);
1147 
1148 add_field:
1149         field->type = type;
1150         field->name = name;
1151         field->offset = offset;
1152         field->size = size;
1153         field->is_signed = is_signed;
1154         field->filter_type = filter_type;
1155 
1156         if (filter_type == FILTER_OTHER)
1157                 field->filter_type = filter_assign_type(type);
1158 
1159         list_add(&field->link, &user->fields);
1160 
1161         /*
1162          * Min size from user writes that are required, this does not include
1163          * the size of trace_entry (common fields).
1164          */
1165         user->min_size = (offset + size) - sizeof(struct trace_entry);
1166 
1167         return 0;
1168 }
1169 
1170 /*
1171  * Parses the values of a field within the description
1172  * Format: type name [size]
1173  */
1174 static int user_event_parse_field(char *field, struct user_event *user,
1175                                   u32 *offset)
1176 {
1177         char *part, *type, *name;
1178         u32 depth = 0, saved_offset = *offset;
1179         int len, size = -EINVAL;
1180         bool is_struct = false;
1181 
1182         field = skip_spaces(field);
1183 
1184         if (*field == '\0')
1185                 return 0;
1186 
1187         /* Handle types that have a space within */
1188         len = str_has_prefix(field, "unsigned ");
1189         if (len)
1190                 goto skip_next;
1191 
1192         len = str_has_prefix(field, "struct ");
1193         if (len) {
1194                 is_struct = true;
1195                 goto skip_next;
1196         }
1197 
1198         len = str_has_prefix(field, "__data_loc unsigned ");
1199         if (len)
1200                 goto skip_next;
1201 
1202         len = str_has_prefix(field, "__data_loc ");
1203         if (len)
1204                 goto skip_next;
1205 
1206         len = str_has_prefix(field, "__rel_loc unsigned ");
1207         if (len)
1208                 goto skip_next;
1209 
1210         len = str_has_prefix(field, "__rel_loc ");
1211         if (len)
1212                 goto skip_next;
1213 
1214         goto parse;
1215 skip_next:
1216         type = field;
1217         field = strpbrk(field + len, " ");
1218 
1219         if (field == NULL)
1220                 return -EINVAL;
1221 
1222         *field++ = '\0';
1223         depth++;
1224 parse:
1225         name = NULL;
1226 
1227         while ((part = strsep(&field, " ")) != NULL) {
1228                 switch (depth++) {
1229                 case FIELD_DEPTH_TYPE:
1230                         type = part;
1231                         break;
1232                 case FIELD_DEPTH_NAME:
1233                         name = part;
1234                         break;
1235                 case FIELD_DEPTH_SIZE:
1236                         if (!is_struct)
1237                                 return -EINVAL;
1238 
1239                         if (kstrtou32(part, 10, &size))
1240                                 return -EINVAL;
1241                         break;
1242                 default:
1243                         return -EINVAL;
1244                 }
1245         }
1246 
1247         if (depth < FIELD_DEPTH_SIZE || !name)
1248                 return -EINVAL;
1249 
1250         if (depth == FIELD_DEPTH_SIZE)
1251                 size = user_field_size(type);
1252 
1253         if (size == 0)
1254                 return -EINVAL;
1255 
1256         if (size < 0)
1257                 return size;
1258 
1259         *offset = saved_offset + size;
1260 
1261         return user_event_add_field(user, type, name, saved_offset, size,
1262                                     type[0] != 'u', FILTER_OTHER);
1263 }
1264 
1265 static int user_event_parse_fields(struct user_event *user, char *args)
1266 {
1267         char *field;
1268         u32 offset = sizeof(struct trace_entry);
1269         int ret = -EINVAL;
1270 
1271         if (args == NULL)
1272                 return 0;
1273 
1274         while ((field = strsep(&args, ";")) != NULL) {
1275                 ret = user_event_parse_field(field, user, &offset);
1276 
1277                 if (ret)
1278                         break;
1279         }
1280 
1281         return ret;
1282 }
1283 
1284 static struct trace_event_fields user_event_fields_array[1];
1285 
1286 static const char *user_field_format(const char *type)
1287 {
1288         if (strcmp(type, "s64") == 0)
1289                 return "%lld";
1290         if (strcmp(type, "u64") == 0)
1291                 return "%llu";
1292         if (strcmp(type, "s32") == 0)
1293                 return "%d";
1294         if (strcmp(type, "u32") == 0)
1295                 return "%u";
1296         if (strcmp(type, "int") == 0)
1297                 return "%d";
1298         if (strcmp(type, "unsigned int") == 0)
1299                 return "%u";
1300         if (strcmp(type, "s16") == 0)
1301                 return "%d";
1302         if (strcmp(type, "u16") == 0)
1303                 return "%u";
1304         if (strcmp(type, "short") == 0)
1305                 return "%d";
1306         if (strcmp(type, "unsigned short") == 0)
1307                 return "%u";
1308         if (strcmp(type, "s8") == 0)
1309                 return "%d";
1310         if (strcmp(type, "u8") == 0)
1311                 return "%u";
1312         if (strcmp(type, "char") == 0)
1313                 return "%d";
1314         if (strcmp(type, "unsigned char") == 0)
1315                 return "%u";
1316         if (strstr(type, "char[") != NULL)
1317                 return "%s";
1318 
1319         /* Unknown, likely struct, allowed treat as 64-bit */
1320         return "%llu";
1321 }
1322 
1323 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1324 {
1325         if (str_has_prefix(type, "__data_loc ")) {
1326                 *str_func = "__get_str";
1327                 goto check;
1328         }
1329 
1330         if (str_has_prefix(type, "__rel_loc ")) {
1331                 *str_func = "__get_rel_str";
1332                 goto check;
1333         }
1334 
1335         return false;
1336 check:
1337         return strstr(type, "char") != NULL;
1338 }
1339 
1340 #define LEN_OR_ZERO (len ? len - pos : 0)
1341 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1342                                      char *buf, int len, bool *colon)
1343 {
1344         int pos = 0, i = *iout;
1345 
1346         *colon = false;
1347 
1348         for (; i < argc; ++i) {
1349                 if (i != *iout)
1350                         pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1351 
1352                 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1353 
1354                 if (strchr(argv[i], ';')) {
1355                         ++i;
1356                         *colon = true;
1357                         break;
1358                 }
1359         }
1360 
1361         /* Actual set, advance i */
1362         if (len != 0)
1363                 *iout = i;
1364 
1365         return pos + 1;
1366 }
1367 
1368 static int user_field_set_string(struct ftrace_event_field *field,
1369                                  char *buf, int len, bool colon)
1370 {
1371         int pos = 0;
1372 
1373         pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1374         pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1375         pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1376 
1377         if (str_has_prefix(field->type, "struct "))
1378                 pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
1379 
1380         if (colon)
1381                 pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1382 
1383         return pos + 1;
1384 }
1385 
1386 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1387 {
1388         struct ftrace_event_field *field;
1389         struct list_head *head = &user->fields;
1390         int pos = 0, depth = 0;
1391         const char *str_func;
1392 
1393         pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1394 
1395         list_for_each_entry_reverse(field, head, link) {
1396                 if (depth != 0)
1397                         pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1398 
1399                 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1400                                 field->name, user_field_format(field->type));
1401 
1402                 depth++;
1403         }
1404 
1405         pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1406 
1407         list_for_each_entry_reverse(field, head, link) {
1408                 if (user_field_is_dyn_string(field->type, &str_func))
1409                         pos += snprintf(buf + pos, LEN_OR_ZERO,
1410                                         ", %s(%s)", str_func, field->name);
1411                 else
1412                         pos += snprintf(buf + pos, LEN_OR_ZERO,
1413                                         ", REC->%s", field->name);
1414         }
1415 
1416         return pos + 1;
1417 }
1418 #undef LEN_OR_ZERO
1419 
1420 static int user_event_create_print_fmt(struct user_event *user)
1421 {
1422         char *print_fmt;
1423         int len;
1424 
1425         len = user_event_set_print_fmt(user, NULL, 0);
1426 
1427         print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1428 
1429         if (!print_fmt)
1430                 return -ENOMEM;
1431 
1432         user_event_set_print_fmt(user, print_fmt, len);
1433 
1434         user->call.print_fmt = print_fmt;
1435 
1436         return 0;
1437 }
1438 
1439 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1440                                                 int flags,
1441                                                 struct trace_event *event)
1442 {
1443         return print_event_fields(iter, event);
1444 }
1445 
1446 static struct trace_event_functions user_event_funcs = {
1447         .trace = user_event_print_trace,
1448 };
1449 
1450 static int user_event_set_call_visible(struct user_event *user, bool visible)
1451 {
1452         int ret;
1453         const struct cred *old_cred;
1454         struct cred *cred;
1455 
1456         cred = prepare_creds();
1457 
1458         if (!cred)
1459                 return -ENOMEM;
1460 
1461         /*
1462          * While by default tracefs is locked down, systems can be configured
1463          * to allow user_event files to be less locked down. The extreme case
1464          * being "other" has read/write access to user_events_data/status.
1465          *
1466          * When not locked down, processes may not have permissions to
1467          * add/remove calls themselves to tracefs. We need to temporarily
1468          * switch to root file permission to allow for this scenario.
1469          */
1470         cred->fsuid = GLOBAL_ROOT_UID;
1471 
1472         old_cred = override_creds(cred);
1473 
1474         if (visible)
1475                 ret = trace_add_event_call(&user->call);
1476         else
1477                 ret = trace_remove_event_call(&user->call);
1478 
1479         revert_creds(old_cred);
1480         put_cred(cred);
1481 
1482         return ret;
1483 }
1484 
1485 static int destroy_user_event(struct user_event *user)
1486 {
1487         int ret = 0;
1488 
1489         lockdep_assert_held(&event_mutex);
1490 
1491         /* Must destroy fields before call removal */
1492         user_event_destroy_fields(user);
1493 
1494         ret = user_event_set_call_visible(user, false);
1495 
1496         if (ret)
1497                 return ret;
1498 
1499         dyn_event_remove(&user->devent);
1500         hash_del(&user->node);
1501 
1502         user_event_destroy_validators(user);
1503 
1504         /* If we have different names, both must be freed */
1505         if (EVENT_NAME(user) != EVENT_TP_NAME(user))
1506                 kfree(EVENT_TP_NAME(user));
1507 
1508         kfree(user->call.print_fmt);
1509         kfree(EVENT_NAME(user));
1510         kfree(user);
1511 
1512         if (current_user_events > 0)
1513                 current_user_events--;
1514         else
1515                 pr_alert("BUG: Bad current_user_events\n");
1516 
1517         return ret;
1518 }
1519 
1520 static struct user_event *find_user_event(struct user_event_group *group,
1521                                           char *name, int argc, const char **argv,
1522                                           u32 flags, u32 *outkey)
1523 {
1524         struct user_event *user;
1525         u32 key = user_event_key(name);
1526 
1527         *outkey = key;
1528 
1529         hash_for_each_possible(group->register_table, user, node, key) {
1530                 /*
1531                  * Single-format events shouldn't return multi-format
1532                  * events. Callers expect the underlying tracepoint to match
1533                  * the name exactly in these cases. Only check like-formats.
1534                  */
1535                 if (EVENT_MULTI_FORMAT(flags) != EVENT_MULTI_FORMAT(user->reg_flags))
1536                         continue;
1537 
1538                 if (strcmp(EVENT_NAME(user), name))
1539                         continue;
1540 
1541                 if (user_fields_match(user, argc, argv))
1542                         return user_event_get(user);
1543 
1544                 /* Scan others if this is a multi-format event */
1545                 if (EVENT_MULTI_FORMAT(flags))
1546                         continue;
1547 
1548                 return ERR_PTR(-EADDRINUSE);
1549         }
1550 
1551         return NULL;
1552 }
1553 
1554 static int user_event_validate(struct user_event *user, void *data, int len)
1555 {
1556         struct list_head *head = &user->validators;
1557         struct user_event_validator *validator;
1558         void *pos, *end = data + len;
1559         u32 loc, offset, size;
1560 
1561         list_for_each_entry(validator, head, user_event_link) {
1562                 pos = data + validator->offset;
1563 
1564                 /* Already done min_size check, no bounds check here */
1565                 loc = *(u32 *)pos;
1566                 offset = loc & 0xffff;
1567                 size = loc >> 16;
1568 
1569                 if (likely(validator->flags & VALIDATOR_REL))
1570                         pos += offset + sizeof(loc);
1571                 else
1572                         pos = data + offset;
1573 
1574                 pos += size;
1575 
1576                 if (unlikely(pos > end))
1577                         return -EFAULT;
1578 
1579                 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1580                         if (unlikely(*(char *)(pos - 1) != '\0'))
1581                                 return -EFAULT;
1582         }
1583 
1584         return 0;
1585 }
1586 
1587 /*
1588  * Writes the user supplied payload out to a trace file.
1589  */
1590 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1591                               void *tpdata, bool *faulted)
1592 {
1593         struct trace_event_file *file;
1594         struct trace_entry *entry;
1595         struct trace_event_buffer event_buffer;
1596         size_t size = sizeof(*entry) + i->count;
1597 
1598         file = (struct trace_event_file *)tpdata;
1599 
1600         if (!file ||
1601             !(file->flags & EVENT_FILE_FL_ENABLED) ||
1602             trace_trigger_soft_disabled(file))
1603                 return;
1604 
1605         /* Allocates and fills trace_entry, + 1 of this is data payload */
1606         entry = trace_event_buffer_reserve(&event_buffer, file, size);
1607 
1608         if (unlikely(!entry))
1609                 return;
1610 
1611         if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i)))
1612                 goto discard;
1613 
1614         if (!list_empty(&user->validators) &&
1615             unlikely(user_event_validate(user, entry, size)))
1616                 goto discard;
1617 
1618         trace_event_buffer_commit(&event_buffer);
1619 
1620         return;
1621 discard:
1622         *faulted = true;
1623         __trace_event_discard_commit(event_buffer.buffer,
1624                                      event_buffer.event);
1625 }
1626 
1627 #ifdef CONFIG_PERF_EVENTS
1628 /*
1629  * Writes the user supplied payload out to perf ring buffer.
1630  */
1631 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1632                             void *tpdata, bool *faulted)
1633 {
1634         struct hlist_head *perf_head;
1635 
1636         perf_head = this_cpu_ptr(user->call.perf_events);
1637 
1638         if (perf_head && !hlist_empty(perf_head)) {
1639                 struct trace_entry *perf_entry;
1640                 struct pt_regs *regs;
1641                 size_t size = sizeof(*perf_entry) + i->count;
1642                 int context;
1643 
1644                 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1645                                                   &regs, &context);
1646 
1647                 if (unlikely(!perf_entry))
1648                         return;
1649 
1650                 perf_fetch_caller_regs(regs);
1651 
1652                 if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i)))
1653                         goto discard;
1654 
1655                 if (!list_empty(&user->validators) &&
1656                     unlikely(user_event_validate(user, perf_entry, size)))
1657                         goto discard;
1658 
1659                 perf_trace_buf_submit(perf_entry, size, context,
1660                                       user->call.event.type, 1, regs,
1661                                       perf_head, NULL);
1662 
1663                 return;
1664 discard:
1665                 *faulted = true;
1666                 perf_swevent_put_recursion_context(context);
1667         }
1668 }
1669 #endif
1670 
1671 /*
1672  * Update the enabled bit among all user processes.
1673  */
1674 static void update_enable_bit_for(struct user_event *user)
1675 {
1676         struct tracepoint *tp = &user->tracepoint;
1677         char status = 0;
1678 
1679         if (atomic_read(&tp->key.enabled) > 0) {
1680                 struct tracepoint_func *probe_func_ptr;
1681                 user_event_func_t probe_func;
1682 
1683                 rcu_read_lock_sched();
1684 
1685                 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1686 
1687                 if (probe_func_ptr) {
1688                         do {
1689                                 probe_func = probe_func_ptr->func;
1690 
1691                                 if (probe_func == user_event_ftrace)
1692                                         status |= EVENT_STATUS_FTRACE;
1693 #ifdef CONFIG_PERF_EVENTS
1694                                 else if (probe_func == user_event_perf)
1695                                         status |= EVENT_STATUS_PERF;
1696 #endif
1697                                 else
1698                                         status |= EVENT_STATUS_OTHER;
1699                         } while ((++probe_func_ptr)->func);
1700                 }
1701 
1702                 rcu_read_unlock_sched();
1703         }
1704 
1705         user->status = status;
1706 
1707         user_event_enabler_update(user);
1708 }
1709 
1710 /*
1711  * Register callback for our events from tracing sub-systems.
1712  */
1713 static int user_event_reg(struct trace_event_call *call,
1714                           enum trace_reg type,
1715                           void *data)
1716 {
1717         struct user_event *user = (struct user_event *)call->data;
1718         int ret = 0;
1719 
1720         if (!user)
1721                 return -ENOENT;
1722 
1723         switch (type) {
1724         case TRACE_REG_REGISTER:
1725                 ret = tracepoint_probe_register(call->tp,
1726                                                 call->class->probe,
1727                                                 data);
1728                 if (!ret)
1729                         goto inc;
1730                 break;
1731 
1732         case TRACE_REG_UNREGISTER:
1733                 tracepoint_probe_unregister(call->tp,
1734                                             call->class->probe,
1735                                             data);
1736                 goto dec;
1737 
1738 #ifdef CONFIG_PERF_EVENTS
1739         case TRACE_REG_PERF_REGISTER:
1740                 ret = tracepoint_probe_register(call->tp,
1741                                                 call->class->perf_probe,
1742                                                 data);
1743                 if (!ret)
1744                         goto inc;
1745                 break;
1746 
1747         case TRACE_REG_PERF_UNREGISTER:
1748                 tracepoint_probe_unregister(call->tp,
1749                                             call->class->perf_probe,
1750                                             data);
1751                 goto dec;
1752 
1753         case TRACE_REG_PERF_OPEN:
1754         case TRACE_REG_PERF_CLOSE:
1755         case TRACE_REG_PERF_ADD:
1756         case TRACE_REG_PERF_DEL:
1757                 break;
1758 #endif
1759         }
1760 
1761         return ret;
1762 inc:
1763         user_event_get(user);
1764         update_enable_bit_for(user);
1765         return 0;
1766 dec:
1767         update_enable_bit_for(user);
1768         user_event_put(user, true);
1769         return 0;
1770 }
1771 
1772 static int user_event_create(const char *raw_command)
1773 {
1774         struct user_event_group *group;
1775         struct user_event *user;
1776         char *name;
1777         int ret;
1778 
1779         if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1780                 return -ECANCELED;
1781 
1782         raw_command += USER_EVENTS_PREFIX_LEN;
1783         raw_command = skip_spaces(raw_command);
1784 
1785         name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1786 
1787         if (!name)
1788                 return -ENOMEM;
1789 
1790         group = current_user_event_group();
1791 
1792         if (!group) {
1793                 kfree(name);
1794                 return -ENOENT;
1795         }
1796 
1797         mutex_lock(&group->reg_mutex);
1798 
1799         /* Dyn events persist, otherwise they would cleanup immediately */
1800         ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
1801 
1802         if (!ret)
1803                 user_event_put(user, false);
1804 
1805         mutex_unlock(&group->reg_mutex);
1806 
1807         if (ret)
1808                 kfree(name);
1809 
1810         return ret;
1811 }
1812 
1813 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1814 {
1815         struct user_event *user = container_of(ev, struct user_event, devent);
1816         struct ftrace_event_field *field;
1817         struct list_head *head;
1818         int depth = 0;
1819 
1820         seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1821 
1822         head = trace_get_fields(&user->call);
1823 
1824         list_for_each_entry_reverse(field, head, link) {
1825                 if (depth == 0)
1826                         seq_puts(m, " ");
1827                 else
1828                         seq_puts(m, "; ");
1829 
1830                 seq_printf(m, "%s %s", field->type, field->name);
1831 
1832                 if (str_has_prefix(field->type, "struct "))
1833                         seq_printf(m, " %d", field->size);
1834 
1835                 depth++;
1836         }
1837 
1838         seq_puts(m, "\n");
1839 
1840         return 0;
1841 }
1842 
1843 static bool user_event_is_busy(struct dyn_event *ev)
1844 {
1845         struct user_event *user = container_of(ev, struct user_event, devent);
1846 
1847         return !user_event_last_ref(user);
1848 }
1849 
1850 static int user_event_free(struct dyn_event *ev)
1851 {
1852         struct user_event *user = container_of(ev, struct user_event, devent);
1853 
1854         if (!user_event_last_ref(user))
1855                 return -EBUSY;
1856 
1857         if (!user_event_capable(user->reg_flags))
1858                 return -EPERM;
1859 
1860         return destroy_user_event(user);
1861 }
1862 
1863 static bool user_field_match(struct ftrace_event_field *field, int argc,
1864                              const char **argv, int *iout)
1865 {
1866         char *field_name = NULL, *dyn_field_name = NULL;
1867         bool colon = false, match = false;
1868         int dyn_len, len;
1869 
1870         if (*iout >= argc)
1871                 return false;
1872 
1873         dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1874                                             0, &colon);
1875 
1876         len = user_field_set_string(field, field_name, 0, colon);
1877 
1878         if (dyn_len != len)
1879                 return false;
1880 
1881         dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1882         field_name = kmalloc(len, GFP_KERNEL);
1883 
1884         if (!dyn_field_name || !field_name)
1885                 goto out;
1886 
1887         user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1888                                   dyn_len, &colon);
1889 
1890         user_field_set_string(field, field_name, len, colon);
1891 
1892         match = strcmp(dyn_field_name, field_name) == 0;
1893 out:
1894         kfree(dyn_field_name);
1895         kfree(field_name);
1896 
1897         return match;
1898 }
1899 
1900 static bool user_fields_match(struct user_event *user, int argc,
1901                               const char **argv)
1902 {
1903         struct ftrace_event_field *field;
1904         struct list_head *head = &user->fields;
1905         int i = 0;
1906 
1907         if (argc == 0)
1908                 return list_empty(head);
1909 
1910         list_for_each_entry_reverse(field, head, link) {
1911                 if (!user_field_match(field, argc, argv, &i))
1912                         return false;
1913         }
1914 
1915         if (i != argc)
1916                 return false;
1917 
1918         return true;
1919 }
1920 
1921 static bool user_event_match(const char *system, const char *event,
1922                              int argc, const char **argv, struct dyn_event *ev)
1923 {
1924         struct user_event *user = container_of(ev, struct user_event, devent);
1925         bool match;
1926 
1927         match = strcmp(EVENT_NAME(user), event) == 0;
1928 
1929         if (match && system) {
1930                 match = strcmp(system, user->group->system_name) == 0 ||
1931                         strcmp(system, user->group->system_multi_name) == 0;
1932         }
1933 
1934         if (match)
1935                 match = user_fields_match(user, argc, argv);
1936 
1937         return match;
1938 }
1939 
1940 static struct dyn_event_operations user_event_dops = {
1941         .create = user_event_create,
1942         .show = user_event_show,
1943         .is_busy = user_event_is_busy,
1944         .free = user_event_free,
1945         .match = user_event_match,
1946 };
1947 
1948 static int user_event_trace_register(struct user_event *user)
1949 {
1950         int ret;
1951 
1952         ret = register_trace_event(&user->call.event);
1953 
1954         if (!ret)
1955                 return -ENODEV;
1956 
1957         ret = user_event_set_call_visible(user, true);
1958 
1959         if (ret)
1960                 unregister_trace_event(&user->call.event);
1961 
1962         return ret;
1963 }
1964 
1965 static int user_event_set_tp_name(struct user_event *user)
1966 {
1967         lockdep_assert_held(&user->group->reg_mutex);
1968 
1969         if (EVENT_MULTI_FORMAT(user->reg_flags)) {
1970                 char *multi_name;
1971 
1972                 multi_name = kasprintf(GFP_KERNEL_ACCOUNT, "%s.%llx",
1973                                        user->reg_name, user->group->multi_id);
1974 
1975                 if (!multi_name)
1976                         return -ENOMEM;
1977 
1978                 user->call.name = multi_name;
1979                 user->tracepoint.name = multi_name;
1980 
1981                 /* Inc to ensure unique multi-event name next time */
1982                 user->group->multi_id++;
1983         } else {
1984                 /* Non Multi-format uses register name */
1985                 user->call.name = user->reg_name;
1986                 user->tracepoint.name = user->reg_name;
1987         }
1988 
1989         return 0;
1990 }
1991 
1992 /*
1993  * Counts how many ';' without a trailing space are in the args.
1994  */
1995 static int count_semis_no_space(char *args)
1996 {
1997         int count = 0;
1998 
1999         while ((args = strchr(args, ';'))) {
2000                 args++;
2001 
2002                 if (!isspace(*args))
2003                         count++;
2004         }
2005 
2006         return count;
2007 }
2008 
2009 /*
2010  * Copies the arguments while ensuring all ';' have a trailing space.
2011  */
2012 static char *insert_space_after_semis(char *args, int count)
2013 {
2014         char *fixed, *pos;
2015         int len;
2016 
2017         len = strlen(args) + count;
2018         fixed = kmalloc(len + 1, GFP_KERNEL);
2019 
2020         if (!fixed)
2021                 return NULL;
2022 
2023         pos = fixed;
2024 
2025         /* Insert a space after ';' if there is no trailing space. */
2026         while (*args) {
2027                 *pos = *args++;
2028 
2029                 if (*pos++ == ';' && !isspace(*args))
2030                         *pos++ = ' ';
2031         }
2032 
2033         *pos = '\0';
2034 
2035         return fixed;
2036 }
2037 
2038 static char **user_event_argv_split(char *args, int *argc)
2039 {
2040         char **split;
2041         char *fixed;
2042         int count;
2043 
2044         /* Count how many ';' without a trailing space */
2045         count = count_semis_no_space(args);
2046 
2047         /* No fixup is required */
2048         if (!count)
2049                 return argv_split(GFP_KERNEL, args, argc);
2050 
2051         /* We must fixup 'field;field' to 'field; field' */
2052         fixed = insert_space_after_semis(args, count);
2053 
2054         if (!fixed)
2055                 return NULL;
2056 
2057         /* We do a normal split afterwards */
2058         split = argv_split(GFP_KERNEL, fixed, argc);
2059 
2060         /* We can free since argv_split makes a copy */
2061         kfree(fixed);
2062 
2063         return split;
2064 }
2065 
2066 /*
2067  * Parses the event name, arguments and flags then registers if successful.
2068  * The name buffer lifetime is owned by this method for success cases only.
2069  * Upon success the returned user_event has its ref count increased by 1.
2070  */
2071 static int user_event_parse(struct user_event_group *group, char *name,
2072                             char *args, char *flags,
2073                             struct user_event **newuser, int reg_flags)
2074 {
2075         struct user_event *user;
2076         char **argv = NULL;
2077         int argc = 0;
2078         int ret;
2079         u32 key;
2080 
2081         /* Currently don't support any text based flags */
2082         if (flags != NULL)
2083                 return -EINVAL;
2084 
2085         if (!user_event_capable(reg_flags))
2086                 return -EPERM;
2087 
2088         if (args) {
2089                 argv = user_event_argv_split(args, &argc);
2090 
2091                 if (!argv)
2092                         return -ENOMEM;
2093         }
2094 
2095         /* Prevent dyn_event from racing */
2096         mutex_lock(&event_mutex);
2097         user = find_user_event(group, name, argc, (const char **)argv,
2098                                reg_flags, &key);
2099         mutex_unlock(&event_mutex);
2100 
2101         if (argv)
2102                 argv_free(argv);
2103 
2104         if (IS_ERR(user))
2105                 return PTR_ERR(user);
2106 
2107         if (user) {
2108                 *newuser = user;
2109                 /*
2110                  * Name is allocated by caller, free it since it already exists.
2111                  * Caller only worries about failure cases for freeing.
2112                  */
2113                 kfree(name);
2114 
2115                 return 0;
2116         }
2117 
2118         user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
2119 
2120         if (!user)
2121                 return -ENOMEM;
2122 
2123         INIT_LIST_HEAD(&user->class.fields);
2124         INIT_LIST_HEAD(&user->fields);
2125         INIT_LIST_HEAD(&user->validators);
2126 
2127         user->group = group;
2128         user->reg_name = name;
2129         user->reg_flags = reg_flags;
2130 
2131         ret = user_event_set_tp_name(user);
2132 
2133         if (ret)
2134                 goto put_user;
2135 
2136         ret = user_event_parse_fields(user, args);
2137 
2138         if (ret)
2139                 goto put_user;
2140 
2141         ret = user_event_create_print_fmt(user);
2142 
2143         if (ret)
2144                 goto put_user;
2145 
2146         user->call.data = user;
2147         user->call.class = &user->class;
2148         user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
2149         user->call.tp = &user->tracepoint;
2150         user->call.event.funcs = &user_event_funcs;
2151 
2152         if (EVENT_MULTI_FORMAT(user->reg_flags))
2153                 user->class.system = group->system_multi_name;
2154         else
2155                 user->class.system = group->system_name;
2156 
2157         user->class.fields_array = user_event_fields_array;
2158         user->class.get_fields = user_event_get_fields;
2159         user->class.reg = user_event_reg;
2160         user->class.probe = user_event_ftrace;
2161 #ifdef CONFIG_PERF_EVENTS
2162         user->class.perf_probe = user_event_perf;
2163 #endif
2164 
2165         mutex_lock(&event_mutex);
2166 
2167         if (current_user_events >= max_user_events) {
2168                 ret = -EMFILE;
2169                 goto put_user_lock;
2170         }
2171 
2172         ret = user_event_trace_register(user);
2173 
2174         if (ret)
2175                 goto put_user_lock;
2176 
2177         if (user->reg_flags & USER_EVENT_REG_PERSIST) {
2178                 /* Ensure we track self ref and caller ref (2) */
2179                 refcount_set(&user->refcnt, 2);
2180         } else {
2181                 /* Ensure we track only caller ref (1) */
2182                 refcount_set(&user->refcnt, 1);
2183         }
2184 
2185         dyn_event_init(&user->devent, &user_event_dops);
2186         dyn_event_add(&user->devent, &user->call);
2187         hash_add(group->register_table, &user->node, key);
2188         current_user_events++;
2189 
2190         mutex_unlock(&event_mutex);
2191 
2192         *newuser = user;
2193         return 0;
2194 put_user_lock:
2195         mutex_unlock(&event_mutex);
2196 put_user:
2197         user_event_destroy_fields(user);
2198         user_event_destroy_validators(user);
2199         kfree(user->call.print_fmt);
2200 
2201         /* Caller frees reg_name on error, but not multi-name */
2202         if (EVENT_NAME(user) != EVENT_TP_NAME(user))
2203                 kfree(EVENT_TP_NAME(user));
2204 
2205         kfree(user);
2206         return ret;
2207 }
2208 
2209 /*
2210  * Deletes previously created events if they are no longer being used.
2211  */
2212 static int delete_user_event(struct user_event_group *group, char *name)
2213 {
2214         struct user_event *user;
2215         struct hlist_node *tmp;
2216         u32 key = user_event_key(name);
2217         int ret = -ENOENT;
2218 
2219         /* Attempt to delete all event(s) with the name passed in */
2220         hash_for_each_possible_safe(group->register_table, user, tmp, node, key) {
2221                 if (strcmp(EVENT_NAME(user), name))
2222                         continue;
2223 
2224                 if (!user_event_last_ref(user))
2225                         return -EBUSY;
2226 
2227                 if (!user_event_capable(user->reg_flags))
2228                         return -EPERM;
2229 
2230                 ret = destroy_user_event(user);
2231 
2232                 if (ret)
2233                         goto out;
2234         }
2235 out:
2236         return ret;
2237 }
2238 
2239 /*
2240  * Validates the user payload and writes via iterator.
2241  */
2242 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
2243 {
2244         struct user_event_file_info *info = file->private_data;
2245         struct user_event_refs *refs;
2246         struct user_event *user = NULL;
2247         struct tracepoint *tp;
2248         ssize_t ret = i->count;
2249         int idx;
2250 
2251         if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
2252                 return -EFAULT;
2253 
2254         if (idx < 0)
2255                 return -EINVAL;
2256 
2257         rcu_read_lock_sched();
2258 
2259         refs = rcu_dereference_sched(info->refs);
2260 
2261         /*
2262          * The refs->events array is protected by RCU, and new items may be
2263          * added. But the user retrieved from indexing into the events array
2264          * shall be immutable while the file is opened.
2265          */
2266         if (likely(refs && idx < refs->count))
2267                 user = refs->events[idx];
2268 
2269         rcu_read_unlock_sched();
2270 
2271         if (unlikely(user == NULL))
2272                 return -ENOENT;
2273 
2274         if (unlikely(i->count < user->min_size))
2275                 return -EINVAL;
2276 
2277         tp = &user->tracepoint;
2278 
2279         /*
2280          * It's possible key.enabled disables after this check, however
2281          * we don't mind if a few events are included in this condition.
2282          */
2283         if (likely(atomic_read(&tp->key.enabled) > 0)) {
2284                 struct tracepoint_func *probe_func_ptr;
2285                 user_event_func_t probe_func;
2286                 struct iov_iter copy;
2287                 void *tpdata;
2288                 bool faulted;
2289 
2290                 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
2291                         return -EFAULT;
2292 
2293                 faulted = false;
2294 
2295                 rcu_read_lock_sched();
2296 
2297                 probe_func_ptr = rcu_dereference_sched(tp->funcs);
2298 
2299                 if (probe_func_ptr) {
2300                         do {
2301                                 copy = *i;
2302                                 probe_func = probe_func_ptr->func;
2303                                 tpdata = probe_func_ptr->data;
2304                                 probe_func(user, &copy, tpdata, &faulted);
2305                         } while ((++probe_func_ptr)->func);
2306                 }
2307 
2308                 rcu_read_unlock_sched();
2309 
2310                 if (unlikely(faulted))
2311                         return -EFAULT;
2312         } else
2313                 return -EBADF;
2314 
2315         return ret;
2316 }
2317 
2318 static int user_events_open(struct inode *node, struct file *file)
2319 {
2320         struct user_event_group *group;
2321         struct user_event_file_info *info;
2322 
2323         group = current_user_event_group();
2324 
2325         if (!group)
2326                 return -ENOENT;
2327 
2328         info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
2329 
2330         if (!info)
2331                 return -ENOMEM;
2332 
2333         info->group = group;
2334 
2335         file->private_data = info;
2336 
2337         return 0;
2338 }
2339 
2340 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
2341                                  size_t count, loff_t *ppos)
2342 {
2343         struct iov_iter i;
2344 
2345         if (unlikely(*ppos != 0))
2346                 return -EFAULT;
2347 
2348         if (unlikely(import_ubuf(ITER_SOURCE, (char __user *)ubuf, count, &i)))
2349                 return -EFAULT;
2350 
2351         return user_events_write_core(file, &i);
2352 }
2353 
2354 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
2355 {
2356         return user_events_write_core(kp->ki_filp, i);
2357 }
2358 
2359 static int user_events_ref_add(struct user_event_file_info *info,
2360                                struct user_event *user)
2361 {
2362         struct user_event_group *group = info->group;
2363         struct user_event_refs *refs, *new_refs;
2364         int i, size, count = 0;
2365 
2366         refs = rcu_dereference_protected(info->refs,
2367                                          lockdep_is_held(&group->reg_mutex));
2368 
2369         if (refs) {
2370                 count = refs->count;
2371 
2372                 for (i = 0; i < count; ++i)
2373                         if (refs->events[i] == user)
2374                                 return i;
2375         }
2376 
2377         size = struct_size(refs, events, count + 1);
2378 
2379         new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2380 
2381         if (!new_refs)
2382                 return -ENOMEM;
2383 
2384         new_refs->count = count + 1;
2385 
2386         for (i = 0; i < count; ++i)
2387                 new_refs->events[i] = refs->events[i];
2388 
2389         new_refs->events[i] = user_event_get(user);
2390 
2391         rcu_assign_pointer(info->refs, new_refs);
2392 
2393         if (refs)
2394                 kfree_rcu(refs, rcu);
2395 
2396         return i;
2397 }
2398 
2399 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2400 {
2401         u32 size;
2402         long ret;
2403 
2404         ret = get_user(size, &ureg->size);
2405 
2406         if (ret)
2407                 return ret;
2408 
2409         if (size > PAGE_SIZE)
2410                 return -E2BIG;
2411 
2412         if (size < offsetofend(struct user_reg, write_index))
2413                 return -EINVAL;
2414 
2415         ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2416 
2417         if (ret)
2418                 return ret;
2419 
2420         /* Ensure only valid flags */
2421         if (kreg->flags & ~(USER_EVENT_REG_MAX-1))
2422                 return -EINVAL;
2423 
2424         /* Ensure supported size */
2425         switch (kreg->enable_size) {
2426         case 4:
2427                 /* 32-bit */
2428                 break;
2429 #if BITS_PER_LONG >= 64
2430         case 8:
2431                 /* 64-bit */
2432                 break;
2433 #endif
2434         default:
2435                 return -EINVAL;
2436         }
2437 
2438         /* Ensure natural alignment */
2439         if (kreg->enable_addr % kreg->enable_size)
2440                 return -EINVAL;
2441 
2442         /* Ensure bit range for size */
2443         if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2444                 return -EINVAL;
2445 
2446         /* Ensure accessible */
2447         if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2448                        kreg->enable_size))
2449                 return -EFAULT;
2450 
2451         kreg->size = size;
2452 
2453         return 0;
2454 }
2455 
2456 /*
2457  * Registers a user_event on behalf of a user process.
2458  */
2459 static long user_events_ioctl_reg(struct user_event_file_info *info,
2460                                   unsigned long uarg)
2461 {
2462         struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2463         struct user_reg reg;
2464         struct user_event *user;
2465         struct user_event_enabler *enabler;
2466         char *name;
2467         long ret;
2468         int write_result;
2469 
2470         ret = user_reg_get(ureg, &reg);
2471 
2472         if (ret)
2473                 return ret;
2474 
2475         /*
2476          * Prevent users from using the same address and bit multiple times
2477          * within the same mm address space. This can cause unexpected behavior
2478          * for user processes that is far easier to debug if this is explictly
2479          * an error upon registering.
2480          */
2481         if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2482                                               reg.enable_bit))
2483                 return -EADDRINUSE;
2484 
2485         name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2486                             MAX_EVENT_DESC);
2487 
2488         if (IS_ERR(name)) {
2489                 ret = PTR_ERR(name);
2490                 return ret;
2491         }
2492 
2493         ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
2494 
2495         if (ret) {
2496                 kfree(name);
2497                 return ret;
2498         }
2499 
2500         ret = user_events_ref_add(info, user);
2501 
2502         /* No longer need parse ref, ref_add either worked or not */
2503         user_event_put(user, false);
2504 
2505         /* Positive number is index and valid */
2506         if (ret < 0)
2507                 return ret;
2508 
2509         /*
2510          * user_events_ref_add succeeded:
2511          * At this point we have a user_event, it's lifetime is bound by the
2512          * reference count, not this file. If anything fails, the user_event
2513          * still has a reference until the file is released. During release
2514          * any remaining references (from user_events_ref_add) are decremented.
2515          *
2516          * Attempt to create an enabler, which too has a lifetime tied in the
2517          * same way for the event. Once the task that caused the enabler to be
2518          * created exits or issues exec() then the enablers it has created
2519          * will be destroyed and the ref to the event will be decremented.
2520          */
2521         enabler = user_event_enabler_create(&reg, user, &write_result);
2522 
2523         if (!enabler)
2524                 return -ENOMEM;
2525 
2526         /* Write failed/faulted, give error back to caller */
2527         if (write_result)
2528                 return write_result;
2529 
2530         put_user((u32)ret, &ureg->write_index);
2531 
2532         return 0;
2533 }
2534 
2535 /*
2536  * Deletes a user_event on behalf of a user process.
2537  */
2538 static long user_events_ioctl_del(struct user_event_file_info *info,
2539                                   unsigned long uarg)
2540 {
2541         void __user *ubuf = (void __user *)uarg;
2542         char *name;
2543         long ret;
2544 
2545         name = strndup_user(ubuf, MAX_EVENT_DESC);
2546 
2547         if (IS_ERR(name))
2548                 return PTR_ERR(name);
2549 
2550         /* event_mutex prevents dyn_event from racing */
2551         mutex_lock(&event_mutex);
2552         ret = delete_user_event(info->group, name);
2553         mutex_unlock(&event_mutex);
2554 
2555         kfree(name);
2556 
2557         return ret;
2558 }
2559 
2560 static long user_unreg_get(struct user_unreg __user *ureg,
2561                            struct user_unreg *kreg)
2562 {
2563         u32 size;
2564         long ret;
2565 
2566         ret = get_user(size, &ureg->size);
2567 
2568         if (ret)
2569                 return ret;
2570 
2571         if (size > PAGE_SIZE)
2572                 return -E2BIG;
2573 
2574         if (size < offsetofend(struct user_unreg, disable_addr))
2575                 return -EINVAL;
2576 
2577         ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2578 
2579         /* Ensure no reserved values, since we don't support any yet */
2580         if (kreg->__reserved || kreg->__reserved2)
2581                 return -EINVAL;
2582 
2583         return ret;
2584 }
2585 
2586 static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2587                                    unsigned long uaddr, unsigned char bit,
2588                                    unsigned long flags)
2589 {
2590         struct user_event_enabler enabler;
2591         int result;
2592         int attempt = 0;
2593 
2594         memset(&enabler, 0, sizeof(enabler));
2595         enabler.addr = uaddr;
2596         enabler.values = bit | flags;
2597 retry:
2598         /* Prevents state changes from racing with new enablers */
2599         mutex_lock(&event_mutex);
2600 
2601         /* Force the bit to be cleared, since no event is attached */
2602         mmap_read_lock(user_mm->mm);
2603         result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2604         mmap_read_unlock(user_mm->mm);
2605 
2606         mutex_unlock(&event_mutex);
2607 
2608         if (result) {
2609                 /* Attempt to fault-in and retry if it worked */
2610                 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2611                         goto retry;
2612         }
2613 
2614         return result;
2615 }
2616 
2617 /*
2618  * Unregisters an enablement address/bit within a task/user mm.
2619  */
2620 static long user_events_ioctl_unreg(unsigned long uarg)
2621 {
2622         struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2623         struct user_event_mm *mm = current->user_event_mm;
2624         struct user_event_enabler *enabler, *next;
2625         struct user_unreg reg;
2626         unsigned long flags;
2627         long ret;
2628 
2629         ret = user_unreg_get(ureg, &reg);
2630 
2631         if (ret)
2632                 return ret;
2633 
2634         if (!mm)
2635                 return -ENOENT;
2636 
2637         flags = 0;
2638         ret = -ENOENT;
2639 
2640         /*
2641          * Flags freeing and faulting are used to indicate if the enabler is in
2642          * use at all. When faulting is set a page-fault is occurring asyncly.
2643          * During async fault if freeing is set, the enabler will be destroyed.
2644          * If no async fault is happening, we can destroy it now since we hold
2645          * the event_mutex during these checks.
2646          */
2647         mutex_lock(&event_mutex);
2648 
2649         list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
2650                 if (enabler->addr == reg.disable_addr &&
2651                     ENABLE_BIT(enabler) == reg.disable_bit) {
2652                         set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2653 
2654                         /* We must keep compat flags for the clear */
2655                         flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
2656 
2657                         if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2658                                 user_event_enabler_destroy(enabler, true);
2659 
2660                         /* Removed at least one */
2661                         ret = 0;
2662                 }
2663         }
2664 
2665         mutex_unlock(&event_mutex);
2666 
2667         /* Ensure bit is now cleared for user, regardless of event status */
2668         if (!ret)
2669                 ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2670                                               reg.disable_bit, flags);
2671 
2672         return ret;
2673 }
2674 
2675 /*
2676  * Handles the ioctl from user mode to register or alter operations.
2677  */
2678 static long user_events_ioctl(struct file *file, unsigned int cmd,
2679                               unsigned long uarg)
2680 {
2681         struct user_event_file_info *info = file->private_data;
2682         struct user_event_group *group = info->group;
2683         long ret = -ENOTTY;
2684 
2685         switch (cmd) {
2686         case DIAG_IOCSREG:
2687                 mutex_lock(&group->reg_mutex);
2688                 ret = user_events_ioctl_reg(info, uarg);
2689                 mutex_unlock(&group->reg_mutex);
2690                 break;
2691 
2692         case DIAG_IOCSDEL:
2693                 mutex_lock(&group->reg_mutex);
2694                 ret = user_events_ioctl_del(info, uarg);
2695                 mutex_unlock(&group->reg_mutex);
2696                 break;
2697 
2698         case DIAG_IOCSUNREG:
2699                 mutex_lock(&group->reg_mutex);
2700                 ret = user_events_ioctl_unreg(uarg);
2701                 mutex_unlock(&group->reg_mutex);
2702                 break;
2703         }
2704 
2705         return ret;
2706 }
2707 
2708 /*
2709  * Handles the final close of the file from user mode.
2710  */
2711 static int user_events_release(struct inode *node, struct file *file)
2712 {
2713         struct user_event_file_info *info = file->private_data;
2714         struct user_event_group *group;
2715         struct user_event_refs *refs;
2716         int i;
2717 
2718         if (!info)
2719                 return -EINVAL;
2720 
2721         group = info->group;
2722 
2723         /*
2724          * Ensure refs cannot change under any situation by taking the
2725          * register mutex during the final freeing of the references.
2726          */
2727         mutex_lock(&group->reg_mutex);
2728 
2729         refs = info->refs;
2730 
2731         if (!refs)
2732                 goto out;
2733 
2734         /*
2735          * The lifetime of refs has reached an end, it's tied to this file.
2736          * The underlying user_events are ref counted, and cannot be freed.
2737          * After this decrement, the user_events may be freed elsewhere.
2738          */
2739         for (i = 0; i < refs->count; ++i)
2740                 user_event_put(refs->events[i], false);
2741 
2742 out:
2743         file->private_data = NULL;
2744 
2745         mutex_unlock(&group->reg_mutex);
2746 
2747         kfree(refs);
2748         kfree(info);
2749 
2750         return 0;
2751 }
2752 
2753 static const struct file_operations user_data_fops = {
2754         .open           = user_events_open,
2755         .write          = user_events_write,
2756         .write_iter     = user_events_write_iter,
2757         .unlocked_ioctl = user_events_ioctl,
2758         .release        = user_events_release,
2759 };
2760 
2761 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2762 {
2763         if (*pos)
2764                 return NULL;
2765 
2766         return (void *)1;
2767 }
2768 
2769 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2770 {
2771         ++*pos;
2772         return NULL;
2773 }
2774 
2775 static void user_seq_stop(struct seq_file *m, void *p)
2776 {
2777 }
2778 
2779 static int user_seq_show(struct seq_file *m, void *p)
2780 {
2781         struct user_event_group *group = m->private;
2782         struct user_event *user;
2783         char status;
2784         int i, active = 0, busy = 0;
2785 
2786         if (!group)
2787                 return -EINVAL;
2788 
2789         mutex_lock(&group->reg_mutex);
2790 
2791         hash_for_each(group->register_table, i, user, node) {
2792                 status = user->status;
2793 
2794                 seq_printf(m, "%s", EVENT_TP_NAME(user));
2795 
2796                 if (status != 0)
2797                         seq_puts(m, " #");
2798 
2799                 if (status != 0) {
2800                         seq_puts(m, " Used by");
2801                         if (status & EVENT_STATUS_FTRACE)
2802                                 seq_puts(m, " ftrace");
2803                         if (status & EVENT_STATUS_PERF)
2804                                 seq_puts(m, " perf");
2805                         if (status & EVENT_STATUS_OTHER)
2806                                 seq_puts(m, " other");
2807                         busy++;
2808                 }
2809 
2810                 seq_puts(m, "\n");
2811                 active++;
2812         }
2813 
2814         mutex_unlock(&group->reg_mutex);
2815 
2816         seq_puts(m, "\n");
2817         seq_printf(m, "Active: %d\n", active);
2818         seq_printf(m, "Busy: %d\n", busy);
2819 
2820         return 0;
2821 }
2822 
2823 static const struct seq_operations user_seq_ops = {
2824         .start  = user_seq_start,
2825         .next   = user_seq_next,
2826         .stop   = user_seq_stop,
2827         .show   = user_seq_show,
2828 };
2829 
2830 static int user_status_open(struct inode *node, struct file *file)
2831 {
2832         struct user_event_group *group;
2833         int ret;
2834 
2835         group = current_user_event_group();
2836 
2837         if (!group)
2838                 return -ENOENT;
2839 
2840         ret = seq_open(file, &user_seq_ops);
2841 
2842         if (!ret) {
2843                 /* Chain group to seq_file */
2844                 struct seq_file *m = file->private_data;
2845 
2846                 m->private = group;
2847         }
2848 
2849         return ret;
2850 }
2851 
2852 static const struct file_operations user_status_fops = {
2853         .open           = user_status_open,
2854         .read           = seq_read,
2855         .llseek         = seq_lseek,
2856         .release        = seq_release,
2857 };
2858 
2859 /*
2860  * Creates a set of tracefs files to allow user mode interactions.
2861  */
2862 static int create_user_tracefs(void)
2863 {
2864         struct dentry *edata, *emmap;
2865 
2866         edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2867                                     NULL, NULL, &user_data_fops);
2868 
2869         if (!edata) {
2870                 pr_warn("Could not create tracefs 'user_events_data' entry\n");
2871                 goto err;
2872         }
2873 
2874         emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2875                                     NULL, NULL, &user_status_fops);
2876 
2877         if (!emmap) {
2878                 tracefs_remove(edata);
2879                 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2880                 goto err;
2881         }
2882 
2883         return 0;
2884 err:
2885         return -ENODEV;
2886 }
2887 
2888 static int set_max_user_events_sysctl(const struct ctl_table *table, int write,
2889                                       void *buffer, size_t *lenp, loff_t *ppos)
2890 {
2891         int ret;
2892 
2893         mutex_lock(&event_mutex);
2894 
2895         ret = proc_douintvec(table, write, buffer, lenp, ppos);
2896 
2897         mutex_unlock(&event_mutex);
2898 
2899         return ret;
2900 }
2901 
2902 static struct ctl_table user_event_sysctls[] = {
2903         {
2904                 .procname       = "user_events_max",
2905                 .data           = &max_user_events,
2906                 .maxlen         = sizeof(unsigned int),
2907                 .mode           = 0644,
2908                 .proc_handler   = set_max_user_events_sysctl,
2909         },
2910 };
2911 
2912 static int __init trace_events_user_init(void)
2913 {
2914         int ret;
2915 
2916         fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2917 
2918         if (!fault_cache)
2919                 return -ENOMEM;
2920 
2921         init_group = user_event_group_create();
2922 
2923         if (!init_group) {
2924                 kmem_cache_destroy(fault_cache);
2925                 return -ENOMEM;
2926         }
2927 
2928         ret = create_user_tracefs();
2929 
2930         if (ret) {
2931                 pr_warn("user_events could not register with tracefs\n");
2932                 user_event_group_destroy(init_group);
2933                 kmem_cache_destroy(fault_cache);
2934                 init_group = NULL;
2935                 return ret;
2936         }
2937 
2938         if (dyn_event_register(&user_event_dops))
2939                 pr_warn("user_events could not register with dyn_events\n");
2940 
2941         register_sysctl_init("kernel", user_event_sysctls);
2942 
2943         return 0;
2944 }
2945 
2946 fs_initcall(trace_events_user_init);
2947 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php