~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/kcov.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #define pr_fmt(fmt) "kcov: " fmt
  3 
  4 #define DISABLE_BRANCH_PROFILING
  5 #include <linux/atomic.h>
  6 #include <linux/compiler.h>
  7 #include <linux/errno.h>
  8 #include <linux/export.h>
  9 #include <linux/types.h>
 10 #include <linux/file.h>
 11 #include <linux/fs.h>
 12 #include <linux/hashtable.h>
 13 #include <linux/init.h>
 14 #include <linux/kmsan-checks.h>
 15 #include <linux/mm.h>
 16 #include <linux/preempt.h>
 17 #include <linux/printk.h>
 18 #include <linux/sched.h>
 19 #include <linux/slab.h>
 20 #include <linux/spinlock.h>
 21 #include <linux/vmalloc.h>
 22 #include <linux/debugfs.h>
 23 #include <linux/uaccess.h>
 24 #include <linux/kcov.h>
 25 #include <linux/refcount.h>
 26 #include <linux/log2.h>
 27 #include <asm/setup.h>
 28 
 29 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
 30 
 31 /* Number of 64-bit words written per one comparison: */
 32 #define KCOV_WORDS_PER_CMP 4
 33 
 34 /*
 35  * kcov descriptor (one per opened debugfs file).
 36  * State transitions of the descriptor:
 37  *  - initial state after open()
 38  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
 39  *  - then, mmap() call (several calls are allowed but not useful)
 40  *  - then, ioctl(KCOV_ENABLE, arg), where arg is
 41  *      KCOV_TRACE_PC - to trace only the PCs
 42  *      or
 43  *      KCOV_TRACE_CMP - to trace only the comparison operands
 44  *  - then, ioctl(KCOV_DISABLE) to disable the task.
 45  * Enabling/disabling ioctls can be repeated (only one task a time allowed).
 46  */
 47 struct kcov {
 48         /*
 49          * Reference counter. We keep one for:
 50          *  - opened file descriptor
 51          *  - task with enabled coverage (we can't unwire it from another task)
 52          *  - each code section for remote coverage collection
 53          */
 54         refcount_t              refcount;
 55         /* The lock protects mode, size, area and t. */
 56         spinlock_t              lock;
 57         enum kcov_mode          mode;
 58         /* Size of arena (in long's). */
 59         unsigned int            size;
 60         /* Coverage buffer shared with user space. */
 61         void                    *area;
 62         /* Task for which we collect coverage, or NULL. */
 63         struct task_struct      *t;
 64         /* Collecting coverage from remote (background) threads. */
 65         bool                    remote;
 66         /* Size of remote area (in long's). */
 67         unsigned int            remote_size;
 68         /*
 69          * Sequence is incremented each time kcov is reenabled, used by
 70          * kcov_remote_stop(), see the comment there.
 71          */
 72         int                     sequence;
 73 };
 74 
 75 struct kcov_remote_area {
 76         struct list_head        list;
 77         unsigned int            size;
 78 };
 79 
 80 struct kcov_remote {
 81         u64                     handle;
 82         struct kcov             *kcov;
 83         struct hlist_node       hnode;
 84 };
 85 
 86 static DEFINE_SPINLOCK(kcov_remote_lock);
 87 static DEFINE_HASHTABLE(kcov_remote_map, 4);
 88 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
 89 
 90 struct kcov_percpu_data {
 91         void                    *irq_area;
 92         local_lock_t            lock;
 93 
 94         unsigned int            saved_mode;
 95         unsigned int            saved_size;
 96         void                    *saved_area;
 97         struct kcov             *saved_kcov;
 98         int                     saved_sequence;
 99 };
100 
101 static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = {
102         .lock = INIT_LOCAL_LOCK(lock),
103 };
104 
105 /* Must be called with kcov_remote_lock locked. */
106 static struct kcov_remote *kcov_remote_find(u64 handle)
107 {
108         struct kcov_remote *remote;
109 
110         hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
111                 if (remote->handle == handle)
112                         return remote;
113         }
114         return NULL;
115 }
116 
117 /* Must be called with kcov_remote_lock locked. */
118 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
119 {
120         struct kcov_remote *remote;
121 
122         if (kcov_remote_find(handle))
123                 return ERR_PTR(-EEXIST);
124         remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
125         if (!remote)
126                 return ERR_PTR(-ENOMEM);
127         remote->handle = handle;
128         remote->kcov = kcov;
129         hash_add(kcov_remote_map, &remote->hnode, handle);
130         return remote;
131 }
132 
133 /* Must be called with kcov_remote_lock locked. */
134 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
135 {
136         struct kcov_remote_area *area;
137         struct list_head *pos;
138 
139         list_for_each(pos, &kcov_remote_areas) {
140                 area = list_entry(pos, struct kcov_remote_area, list);
141                 if (area->size == size) {
142                         list_del(&area->list);
143                         return area;
144                 }
145         }
146         return NULL;
147 }
148 
149 /* Must be called with kcov_remote_lock locked. */
150 static void kcov_remote_area_put(struct kcov_remote_area *area,
151                                         unsigned int size)
152 {
153         INIT_LIST_HEAD(&area->list);
154         area->size = size;
155         list_add(&area->list, &kcov_remote_areas);
156         /*
157          * KMSAN doesn't instrument this file, so it may not know area->list
158          * is initialized. Unpoison it explicitly to avoid reports in
159          * kcov_remote_area_get().
160          */
161         kmsan_unpoison_memory(&area->list, sizeof(area->list));
162 }
163 
164 /*
165  * Unlike in_serving_softirq(), this function returns false when called during
166  * a hardirq or an NMI that happened in the softirq context.
167  */
168 static inline bool in_softirq_really(void)
169 {
170         return in_serving_softirq() && !in_hardirq() && !in_nmi();
171 }
172 
173 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
174 {
175         unsigned int mode;
176 
177         /*
178          * We are interested in code coverage as a function of a syscall inputs,
179          * so we ignore code executed in interrupts, unless we are in a remote
180          * coverage collection section in a softirq.
181          */
182         if (!in_task() && !(in_softirq_really() && t->kcov_softirq))
183                 return false;
184         mode = READ_ONCE(t->kcov_mode);
185         /*
186          * There is some code that runs in interrupts but for which
187          * in_interrupt() returns false (e.g. preempt_schedule_irq()).
188          * READ_ONCE()/barrier() effectively provides load-acquire wrt
189          * interrupts, there are paired barrier()/WRITE_ONCE() in
190          * kcov_start().
191          */
192         barrier();
193         return mode == needed_mode;
194 }
195 
196 static notrace unsigned long canonicalize_ip(unsigned long ip)
197 {
198 #ifdef CONFIG_RANDOMIZE_BASE
199         ip -= kaslr_offset();
200 #endif
201         return ip;
202 }
203 
204 /*
205  * Entry point from instrumented code.
206  * This is called once per basic-block/edge.
207  */
208 void notrace __sanitizer_cov_trace_pc(void)
209 {
210         struct task_struct *t;
211         unsigned long *area;
212         unsigned long ip = canonicalize_ip(_RET_IP_);
213         unsigned long pos;
214 
215         t = current;
216         if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
217                 return;
218 
219         area = t->kcov_area;
220         /* The first 64-bit word is the number of subsequent PCs. */
221         pos = READ_ONCE(area[0]) + 1;
222         if (likely(pos < t->kcov_size)) {
223                 /* Previously we write pc before updating pos. However, some
224                  * early interrupt code could bypass check_kcov_mode() check
225                  * and invoke __sanitizer_cov_trace_pc(). If such interrupt is
226                  * raised between writing pc and updating pos, the pc could be
227                  * overitten by the recursive __sanitizer_cov_trace_pc().
228                  * Update pos before writing pc to avoid such interleaving.
229                  */
230                 WRITE_ONCE(area[0], pos);
231                 barrier();
232                 area[pos] = ip;
233         }
234 }
235 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
236 
237 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
238 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
239 {
240         struct task_struct *t;
241         u64 *area;
242         u64 count, start_index, end_pos, max_pos;
243 
244         t = current;
245         if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
246                 return;
247 
248         ip = canonicalize_ip(ip);
249 
250         /*
251          * We write all comparison arguments and types as u64.
252          * The buffer was allocated for t->kcov_size unsigned longs.
253          */
254         area = (u64 *)t->kcov_area;
255         max_pos = t->kcov_size * sizeof(unsigned long);
256 
257         count = READ_ONCE(area[0]);
258 
259         /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
260         start_index = 1 + count * KCOV_WORDS_PER_CMP;
261         end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
262         if (likely(end_pos <= max_pos)) {
263                 /* See comment in __sanitizer_cov_trace_pc(). */
264                 WRITE_ONCE(area[0], count + 1);
265                 barrier();
266                 area[start_index] = type;
267                 area[start_index + 1] = arg1;
268                 area[start_index + 2] = arg2;
269                 area[start_index + 3] = ip;
270         }
271 }
272 
273 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
274 {
275         write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
276 }
277 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
278 
279 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
280 {
281         write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
282 }
283 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
284 
285 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
286 {
287         write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
288 }
289 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
290 
291 void notrace __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2)
292 {
293         write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
294 }
295 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
296 
297 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
298 {
299         write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
300                         _RET_IP_);
301 }
302 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
303 
304 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
305 {
306         write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
307                         _RET_IP_);
308 }
309 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
310 
311 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
312 {
313         write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
314                         _RET_IP_);
315 }
316 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
317 
318 void notrace __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2)
319 {
320         write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
321                         _RET_IP_);
322 }
323 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
324 
325 void notrace __sanitizer_cov_trace_switch(kcov_u64 val, void *arg)
326 {
327         u64 i;
328         u64 *cases = arg;
329         u64 count = cases[0];
330         u64 size = cases[1];
331         u64 type = KCOV_CMP_CONST;
332 
333         switch (size) {
334         case 8:
335                 type |= KCOV_CMP_SIZE(0);
336                 break;
337         case 16:
338                 type |= KCOV_CMP_SIZE(1);
339                 break;
340         case 32:
341                 type |= KCOV_CMP_SIZE(2);
342                 break;
343         case 64:
344                 type |= KCOV_CMP_SIZE(3);
345                 break;
346         default:
347                 return;
348         }
349         for (i = 0; i < count; i++)
350                 write_comp_data(type, cases[i + 2], val, _RET_IP_);
351 }
352 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
353 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
354 
355 static void kcov_start(struct task_struct *t, struct kcov *kcov,
356                         unsigned int size, void *area, enum kcov_mode mode,
357                         int sequence)
358 {
359         kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
360         t->kcov = kcov;
361         /* Cache in task struct for performance. */
362         t->kcov_size = size;
363         t->kcov_area = area;
364         t->kcov_sequence = sequence;
365         /* See comment in check_kcov_mode(). */
366         barrier();
367         WRITE_ONCE(t->kcov_mode, mode);
368 }
369 
370 static void kcov_stop(struct task_struct *t)
371 {
372         WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
373         barrier();
374         t->kcov = NULL;
375         t->kcov_size = 0;
376         t->kcov_area = NULL;
377 }
378 
379 static void kcov_task_reset(struct task_struct *t)
380 {
381         kcov_stop(t);
382         t->kcov_sequence = 0;
383         t->kcov_handle = 0;
384 }
385 
386 void kcov_task_init(struct task_struct *t)
387 {
388         kcov_task_reset(t);
389         t->kcov_handle = current->kcov_handle;
390 }
391 
392 static void kcov_reset(struct kcov *kcov)
393 {
394         kcov->t = NULL;
395         kcov->mode = KCOV_MODE_INIT;
396         kcov->remote = false;
397         kcov->remote_size = 0;
398         kcov->sequence++;
399 }
400 
401 static void kcov_remote_reset(struct kcov *kcov)
402 {
403         int bkt;
404         struct kcov_remote *remote;
405         struct hlist_node *tmp;
406         unsigned long flags;
407 
408         spin_lock_irqsave(&kcov_remote_lock, flags);
409         hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
410                 if (remote->kcov != kcov)
411                         continue;
412                 hash_del(&remote->hnode);
413                 kfree(remote);
414         }
415         /* Do reset before unlock to prevent races with kcov_remote_start(). */
416         kcov_reset(kcov);
417         spin_unlock_irqrestore(&kcov_remote_lock, flags);
418 }
419 
420 static void kcov_disable(struct task_struct *t, struct kcov *kcov)
421 {
422         kcov_task_reset(t);
423         if (kcov->remote)
424                 kcov_remote_reset(kcov);
425         else
426                 kcov_reset(kcov);
427 }
428 
429 static void kcov_get(struct kcov *kcov)
430 {
431         refcount_inc(&kcov->refcount);
432 }
433 
434 static void kcov_put(struct kcov *kcov)
435 {
436         if (refcount_dec_and_test(&kcov->refcount)) {
437                 kcov_remote_reset(kcov);
438                 vfree(kcov->area);
439                 kfree(kcov);
440         }
441 }
442 
443 void kcov_task_exit(struct task_struct *t)
444 {
445         struct kcov *kcov;
446         unsigned long flags;
447 
448         kcov = t->kcov;
449         if (kcov == NULL)
450                 return;
451 
452         spin_lock_irqsave(&kcov->lock, flags);
453         kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
454         /*
455          * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
456          * which comes down to:
457          *        WARN_ON(!kcov->remote && kcov->t != t);
458          *
459          * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
460          *
461          * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
462          *    In this case we should print a warning right away, since a task
463          *    shouldn't be exiting when it's in a kcov coverage collection
464          *    section. Here t points to the task that is collecting remote
465          *    coverage, and t->kcov->t points to the thread that created the
466          *    kcov device. Which means that to detect this case we need to
467          *    check that t != t->kcov->t, and this gives us the following:
468          *        WARN_ON(kcov->remote && kcov->t != t);
469          *
470          * 2. The task that created kcov exiting without calling KCOV_DISABLE,
471          *    and then again we make sure that t->kcov->t == t:
472          *        WARN_ON(kcov->remote && kcov->t != t);
473          *
474          * By combining all three checks into one we get:
475          */
476         if (WARN_ON(kcov->t != t)) {
477                 spin_unlock_irqrestore(&kcov->lock, flags);
478                 return;
479         }
480         /* Just to not leave dangling references behind. */
481         kcov_disable(t, kcov);
482         spin_unlock_irqrestore(&kcov->lock, flags);
483         kcov_put(kcov);
484 }
485 
486 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
487 {
488         int res = 0;
489         struct kcov *kcov = vma->vm_file->private_data;
490         unsigned long size, off;
491         struct page *page;
492         unsigned long flags;
493 
494         spin_lock_irqsave(&kcov->lock, flags);
495         size = kcov->size * sizeof(unsigned long);
496         if (kcov->area == NULL || vma->vm_pgoff != 0 ||
497             vma->vm_end - vma->vm_start != size) {
498                 res = -EINVAL;
499                 goto exit;
500         }
501         spin_unlock_irqrestore(&kcov->lock, flags);
502         vm_flags_set(vma, VM_DONTEXPAND);
503         for (off = 0; off < size; off += PAGE_SIZE) {
504                 page = vmalloc_to_page(kcov->area + off);
505                 res = vm_insert_page(vma, vma->vm_start + off, page);
506                 if (res) {
507                         pr_warn_once("kcov: vm_insert_page() failed\n");
508                         return res;
509                 }
510         }
511         return 0;
512 exit:
513         spin_unlock_irqrestore(&kcov->lock, flags);
514         return res;
515 }
516 
517 static int kcov_open(struct inode *inode, struct file *filep)
518 {
519         struct kcov *kcov;
520 
521         kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
522         if (!kcov)
523                 return -ENOMEM;
524         kcov->mode = KCOV_MODE_DISABLED;
525         kcov->sequence = 1;
526         refcount_set(&kcov->refcount, 1);
527         spin_lock_init(&kcov->lock);
528         filep->private_data = kcov;
529         return nonseekable_open(inode, filep);
530 }
531 
532 static int kcov_close(struct inode *inode, struct file *filep)
533 {
534         kcov_put(filep->private_data);
535         return 0;
536 }
537 
538 static int kcov_get_mode(unsigned long arg)
539 {
540         if (arg == KCOV_TRACE_PC)
541                 return KCOV_MODE_TRACE_PC;
542         else if (arg == KCOV_TRACE_CMP)
543 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
544                 return KCOV_MODE_TRACE_CMP;
545 #else
546                 return -ENOTSUPP;
547 #endif
548         else
549                 return -EINVAL;
550 }
551 
552 /*
553  * Fault in a lazily-faulted vmalloc area before it can be used by
554  * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
555  * vmalloc fault handling path is instrumented.
556  */
557 static void kcov_fault_in_area(struct kcov *kcov)
558 {
559         unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
560         unsigned long *area = kcov->area;
561         unsigned long offset;
562 
563         for (offset = 0; offset < kcov->size; offset += stride)
564                 READ_ONCE(area[offset]);
565 }
566 
567 static inline bool kcov_check_handle(u64 handle, bool common_valid,
568                                 bool uncommon_valid, bool zero_valid)
569 {
570         if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
571                 return false;
572         switch (handle & KCOV_SUBSYSTEM_MASK) {
573         case KCOV_SUBSYSTEM_COMMON:
574                 return (handle & KCOV_INSTANCE_MASK) ?
575                         common_valid : zero_valid;
576         case KCOV_SUBSYSTEM_USB:
577                 return uncommon_valid;
578         default:
579                 return false;
580         }
581         return false;
582 }
583 
584 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
585                              unsigned long arg)
586 {
587         struct task_struct *t;
588         unsigned long flags, unused;
589         int mode, i;
590         struct kcov_remote_arg *remote_arg;
591         struct kcov_remote *remote;
592 
593         switch (cmd) {
594         case KCOV_ENABLE:
595                 /*
596                  * Enable coverage for the current task.
597                  * At this point user must have been enabled trace mode,
598                  * and mmapped the file. Coverage collection is disabled only
599                  * at task exit or voluntary by KCOV_DISABLE. After that it can
600                  * be enabled for another task.
601                  */
602                 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
603                         return -EINVAL;
604                 t = current;
605                 if (kcov->t != NULL || t->kcov != NULL)
606                         return -EBUSY;
607                 mode = kcov_get_mode(arg);
608                 if (mode < 0)
609                         return mode;
610                 kcov_fault_in_area(kcov);
611                 kcov->mode = mode;
612                 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
613                                 kcov->sequence);
614                 kcov->t = t;
615                 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
616                 kcov_get(kcov);
617                 return 0;
618         case KCOV_DISABLE:
619                 /* Disable coverage for the current task. */
620                 unused = arg;
621                 if (unused != 0 || current->kcov != kcov)
622                         return -EINVAL;
623                 t = current;
624                 if (WARN_ON(kcov->t != t))
625                         return -EINVAL;
626                 kcov_disable(t, kcov);
627                 kcov_put(kcov);
628                 return 0;
629         case KCOV_REMOTE_ENABLE:
630                 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
631                         return -EINVAL;
632                 t = current;
633                 if (kcov->t != NULL || t->kcov != NULL)
634                         return -EBUSY;
635                 remote_arg = (struct kcov_remote_arg *)arg;
636                 mode = kcov_get_mode(remote_arg->trace_mode);
637                 if (mode < 0)
638                         return mode;
639                 if ((unsigned long)remote_arg->area_size >
640                     LONG_MAX / sizeof(unsigned long))
641                         return -EINVAL;
642                 kcov->mode = mode;
643                 t->kcov = kcov;
644                 t->kcov_mode = KCOV_MODE_REMOTE;
645                 kcov->t = t;
646                 kcov->remote = true;
647                 kcov->remote_size = remote_arg->area_size;
648                 spin_lock_irqsave(&kcov_remote_lock, flags);
649                 for (i = 0; i < remote_arg->num_handles; i++) {
650                         if (!kcov_check_handle(remote_arg->handles[i],
651                                                 false, true, false)) {
652                                 spin_unlock_irqrestore(&kcov_remote_lock,
653                                                         flags);
654                                 kcov_disable(t, kcov);
655                                 return -EINVAL;
656                         }
657                         remote = kcov_remote_add(kcov, remote_arg->handles[i]);
658                         if (IS_ERR(remote)) {
659                                 spin_unlock_irqrestore(&kcov_remote_lock,
660                                                         flags);
661                                 kcov_disable(t, kcov);
662                                 return PTR_ERR(remote);
663                         }
664                 }
665                 if (remote_arg->common_handle) {
666                         if (!kcov_check_handle(remote_arg->common_handle,
667                                                 true, false, false)) {
668                                 spin_unlock_irqrestore(&kcov_remote_lock,
669                                                         flags);
670                                 kcov_disable(t, kcov);
671                                 return -EINVAL;
672                         }
673                         remote = kcov_remote_add(kcov,
674                                         remote_arg->common_handle);
675                         if (IS_ERR(remote)) {
676                                 spin_unlock_irqrestore(&kcov_remote_lock,
677                                                         flags);
678                                 kcov_disable(t, kcov);
679                                 return PTR_ERR(remote);
680                         }
681                         t->kcov_handle = remote_arg->common_handle;
682                 }
683                 spin_unlock_irqrestore(&kcov_remote_lock, flags);
684                 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
685                 kcov_get(kcov);
686                 return 0;
687         default:
688                 return -ENOTTY;
689         }
690 }
691 
692 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
693 {
694         struct kcov *kcov;
695         int res;
696         struct kcov_remote_arg *remote_arg = NULL;
697         unsigned int remote_num_handles;
698         unsigned long remote_arg_size;
699         unsigned long size, flags;
700         void *area;
701 
702         kcov = filep->private_data;
703         switch (cmd) {
704         case KCOV_INIT_TRACE:
705                 /*
706                  * Enable kcov in trace mode and setup buffer size.
707                  * Must happen before anything else.
708                  *
709                  * First check the size argument - it must be at least 2
710                  * to hold the current position and one PC.
711                  */
712                 size = arg;
713                 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
714                         return -EINVAL;
715                 area = vmalloc_user(size * sizeof(unsigned long));
716                 if (area == NULL)
717                         return -ENOMEM;
718                 spin_lock_irqsave(&kcov->lock, flags);
719                 if (kcov->mode != KCOV_MODE_DISABLED) {
720                         spin_unlock_irqrestore(&kcov->lock, flags);
721                         vfree(area);
722                         return -EBUSY;
723                 }
724                 kcov->area = area;
725                 kcov->size = size;
726                 kcov->mode = KCOV_MODE_INIT;
727                 spin_unlock_irqrestore(&kcov->lock, flags);
728                 return 0;
729         case KCOV_REMOTE_ENABLE:
730                 if (get_user(remote_num_handles, (unsigned __user *)(arg +
731                                 offsetof(struct kcov_remote_arg, num_handles))))
732                         return -EFAULT;
733                 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
734                         return -EINVAL;
735                 remote_arg_size = struct_size(remote_arg, handles,
736                                         remote_num_handles);
737                 remote_arg = memdup_user((void __user *)arg, remote_arg_size);
738                 if (IS_ERR(remote_arg))
739                         return PTR_ERR(remote_arg);
740                 if (remote_arg->num_handles != remote_num_handles) {
741                         kfree(remote_arg);
742                         return -EINVAL;
743                 }
744                 arg = (unsigned long)remote_arg;
745                 fallthrough;
746         default:
747                 /*
748                  * All other commands can be normally executed under a spin lock, so we
749                  * obtain and release it here in order to simplify kcov_ioctl_locked().
750                  */
751                 spin_lock_irqsave(&kcov->lock, flags);
752                 res = kcov_ioctl_locked(kcov, cmd, arg);
753                 spin_unlock_irqrestore(&kcov->lock, flags);
754                 kfree(remote_arg);
755                 return res;
756         }
757 }
758 
759 static const struct file_operations kcov_fops = {
760         .open           = kcov_open,
761         .unlocked_ioctl = kcov_ioctl,
762         .compat_ioctl   = kcov_ioctl,
763         .mmap           = kcov_mmap,
764         .release        = kcov_close,
765 };
766 
767 /*
768  * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
769  * of code in a kernel background thread or in a softirq to allow kcov to be
770  * used to collect coverage from that part of code.
771  *
772  * The handle argument of kcov_remote_start() identifies a code section that is
773  * used for coverage collection. A userspace process passes this handle to
774  * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
775  * coverage for the code section identified by this handle.
776  *
777  * The usage of these annotations in the kernel code is different depending on
778  * the type of the kernel thread whose code is being annotated.
779  *
780  * For global kernel threads that are spawned in a limited number of instances
781  * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for
782  * softirqs, each instance must be assigned a unique 4-byte instance id. The
783  * instance id is then combined with a 1-byte subsystem id to get a handle via
784  * kcov_remote_handle(subsystem_id, instance_id).
785  *
786  * For local kernel threads that are spawned from system calls handler when a
787  * user interacts with some kernel interface (e.g. vhost workers), a handle is
788  * passed from a userspace process as the common_handle field of the
789  * kcov_remote_arg struct (note, that the user must generate a handle by using
790  * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
791  * arbitrary 4-byte non-zero number as the instance id). This common handle
792  * then gets saved into the task_struct of the process that issued the
793  * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
794  * kernel threads, the common handle must be retrieved via kcov_common_handle()
795  * and passed to the spawned threads via custom annotations. Those kernel
796  * threads must in turn be annotated with kcov_remote_start(common_handle) and
797  * kcov_remote_stop(). All of the threads that are spawned by the same process
798  * obtain the same handle, hence the name "common".
799  *
800  * See Documentation/dev-tools/kcov.rst for more details.
801  *
802  * Internally, kcov_remote_start() looks up the kcov device associated with the
803  * provided handle, allocates an area for coverage collection, and saves the
804  * pointers to kcov and area into the current task_struct to allow coverage to
805  * be collected via __sanitizer_cov_trace_pc().
806  * In turns kcov_remote_stop() clears those pointers from task_struct to stop
807  * collecting coverage and copies all collected coverage into the kcov area.
808  */
809 
810 static inline bool kcov_mode_enabled(unsigned int mode)
811 {
812         return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
813 }
814 
815 static void kcov_remote_softirq_start(struct task_struct *t)
816 {
817         struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
818         unsigned int mode;
819 
820         mode = READ_ONCE(t->kcov_mode);
821         barrier();
822         if (kcov_mode_enabled(mode)) {
823                 data->saved_mode = mode;
824                 data->saved_size = t->kcov_size;
825                 data->saved_area = t->kcov_area;
826                 data->saved_sequence = t->kcov_sequence;
827                 data->saved_kcov = t->kcov;
828                 kcov_stop(t);
829         }
830 }
831 
832 static void kcov_remote_softirq_stop(struct task_struct *t)
833 {
834         struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
835 
836         if (data->saved_kcov) {
837                 kcov_start(t, data->saved_kcov, data->saved_size,
838                                 data->saved_area, data->saved_mode,
839                                 data->saved_sequence);
840                 data->saved_mode = 0;
841                 data->saved_size = 0;
842                 data->saved_area = NULL;
843                 data->saved_sequence = 0;
844                 data->saved_kcov = NULL;
845         }
846 }
847 
848 void kcov_remote_start(u64 handle)
849 {
850         struct task_struct *t = current;
851         struct kcov_remote *remote;
852         struct kcov *kcov;
853         unsigned int mode;
854         void *area;
855         unsigned int size;
856         int sequence;
857         unsigned long flags;
858 
859         if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
860                 return;
861         if (!in_task() && !in_softirq_really())
862                 return;
863 
864         local_lock_irqsave(&kcov_percpu_data.lock, flags);
865 
866         /*
867          * Check that kcov_remote_start() is not called twice in background
868          * threads nor called by user tasks (with enabled kcov).
869          */
870         mode = READ_ONCE(t->kcov_mode);
871         if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
872                 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
873                 return;
874         }
875         /*
876          * Check that kcov_remote_start() is not called twice in softirqs.
877          * Note, that kcov_remote_start() can be called from a softirq that
878          * happened while collecting coverage from a background thread.
879          */
880         if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
881                 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
882                 return;
883         }
884 
885         spin_lock(&kcov_remote_lock);
886         remote = kcov_remote_find(handle);
887         if (!remote) {
888                 spin_unlock(&kcov_remote_lock);
889                 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
890                 return;
891         }
892         kcov_debug("handle = %llx, context: %s\n", handle,
893                         in_task() ? "task" : "softirq");
894         kcov = remote->kcov;
895         /* Put in kcov_remote_stop(). */
896         kcov_get(kcov);
897         /*
898          * Read kcov fields before unlock to prevent races with
899          * KCOV_DISABLE / kcov_remote_reset().
900          */
901         mode = kcov->mode;
902         sequence = kcov->sequence;
903         if (in_task()) {
904                 size = kcov->remote_size;
905                 area = kcov_remote_area_get(size);
906         } else {
907                 size = CONFIG_KCOV_IRQ_AREA_SIZE;
908                 area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
909         }
910         spin_unlock(&kcov_remote_lock);
911 
912         /* Can only happen when in_task(). */
913         if (!area) {
914                 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
915                 area = vmalloc(size * sizeof(unsigned long));
916                 if (!area) {
917                         kcov_put(kcov);
918                         return;
919                 }
920                 local_lock_irqsave(&kcov_percpu_data.lock, flags);
921         }
922 
923         /* Reset coverage size. */
924         *(u64 *)area = 0;
925 
926         if (in_serving_softirq()) {
927                 kcov_remote_softirq_start(t);
928                 t->kcov_softirq = 1;
929         }
930         kcov_start(t, kcov, size, area, mode, sequence);
931 
932         local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
933 
934 }
935 EXPORT_SYMBOL(kcov_remote_start);
936 
937 static void kcov_move_area(enum kcov_mode mode, void *dst_area,
938                                 unsigned int dst_area_size, void *src_area)
939 {
940         u64 word_size = sizeof(unsigned long);
941         u64 count_size, entry_size_log;
942         u64 dst_len, src_len;
943         void *dst_entries, *src_entries;
944         u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
945 
946         kcov_debug("%px %u <= %px %lu\n",
947                 dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
948 
949         switch (mode) {
950         case KCOV_MODE_TRACE_PC:
951                 dst_len = READ_ONCE(*(unsigned long *)dst_area);
952                 src_len = *(unsigned long *)src_area;
953                 count_size = sizeof(unsigned long);
954                 entry_size_log = __ilog2_u64(sizeof(unsigned long));
955                 break;
956         case KCOV_MODE_TRACE_CMP:
957                 dst_len = READ_ONCE(*(u64 *)dst_area);
958                 src_len = *(u64 *)src_area;
959                 count_size = sizeof(u64);
960                 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
961                 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
962                 break;
963         default:
964                 WARN_ON(1);
965                 return;
966         }
967 
968         /* As arm can't divide u64 integers use log of entry size. */
969         if (dst_len > ((dst_area_size * word_size - count_size) >>
970                                 entry_size_log))
971                 return;
972         dst_occupied = count_size + (dst_len << entry_size_log);
973         dst_free = dst_area_size * word_size - dst_occupied;
974         bytes_to_move = min(dst_free, src_len << entry_size_log);
975         dst_entries = dst_area + dst_occupied;
976         src_entries = src_area + count_size;
977         memcpy(dst_entries, src_entries, bytes_to_move);
978         entries_moved = bytes_to_move >> entry_size_log;
979 
980         switch (mode) {
981         case KCOV_MODE_TRACE_PC:
982                 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
983                 break;
984         case KCOV_MODE_TRACE_CMP:
985                 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
986                 break;
987         default:
988                 break;
989         }
990 }
991 
992 /* See the comment before kcov_remote_start() for usage details. */
993 void kcov_remote_stop(void)
994 {
995         struct task_struct *t = current;
996         struct kcov *kcov;
997         unsigned int mode;
998         void *area;
999         unsigned int size;
1000         int sequence;
1001         unsigned long flags;
1002 
1003         if (!in_task() && !in_softirq_really())
1004                 return;
1005 
1006         local_lock_irqsave(&kcov_percpu_data.lock, flags);
1007 
1008         mode = READ_ONCE(t->kcov_mode);
1009         barrier();
1010         if (!kcov_mode_enabled(mode)) {
1011                 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1012                 return;
1013         }
1014         /*
1015          * When in softirq, check if the corresponding kcov_remote_start()
1016          * actually found the remote handle and started collecting coverage.
1017          */
1018         if (in_serving_softirq() && !t->kcov_softirq) {
1019                 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1020                 return;
1021         }
1022         /* Make sure that kcov_softirq is only set when in softirq. */
1023         if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
1024                 local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1025                 return;
1026         }
1027 
1028         kcov = t->kcov;
1029         area = t->kcov_area;
1030         size = t->kcov_size;
1031         sequence = t->kcov_sequence;
1032 
1033         kcov_stop(t);
1034         if (in_serving_softirq()) {
1035                 t->kcov_softirq = 0;
1036                 kcov_remote_softirq_stop(t);
1037         }
1038 
1039         spin_lock(&kcov->lock);
1040         /*
1041          * KCOV_DISABLE could have been called between kcov_remote_start()
1042          * and kcov_remote_stop(), hence the sequence check.
1043          */
1044         if (sequence == kcov->sequence && kcov->remote)
1045                 kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
1046         spin_unlock(&kcov->lock);
1047 
1048         if (in_task()) {
1049                 spin_lock(&kcov_remote_lock);
1050                 kcov_remote_area_put(area, size);
1051                 spin_unlock(&kcov_remote_lock);
1052         }
1053 
1054         local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1055 
1056         /* Get in kcov_remote_start(). */
1057         kcov_put(kcov);
1058 }
1059 EXPORT_SYMBOL(kcov_remote_stop);
1060 
1061 /* See the comment before kcov_remote_start() for usage details. */
1062 u64 kcov_common_handle(void)
1063 {
1064         if (!in_task())
1065                 return 0;
1066         return current->kcov_handle;
1067 }
1068 EXPORT_SYMBOL(kcov_common_handle);
1069 
1070 static int __init kcov_init(void)
1071 {
1072         int cpu;
1073 
1074         for_each_possible_cpu(cpu) {
1075                 void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE *
1076                                 sizeof(unsigned long), cpu_to_node(cpu));
1077                 if (!area)
1078                         return -ENOMEM;
1079                 per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
1080         }
1081 
1082         /*
1083          * The kcov debugfs file won't ever get removed and thus,
1084          * there is no need to protect it against removal races. The
1085          * use of debugfs_create_file_unsafe() is actually safe here.
1086          */
1087         debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
1088 
1089         return 0;
1090 }
1091 
1092 device_initcall(kcov_init);
1093 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php