~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/kmemleak.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * mm/kmemleak.c
  4  *
  5  * Copyright (C) 2008 ARM Limited
  6  * Written by Catalin Marinas <catalin.marinas@arm.com>
  7  *
  8  * For more information on the algorithm and kmemleak usage, please see
  9  * Documentation/dev-tools/kmemleak.rst.
 10  *
 11  * Notes on locking
 12  * ----------------
 13  *
 14  * The following locks and mutexes are used by kmemleak:
 15  *
 16  * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
 17  *   del_state modifications and accesses to the object trees
 18  *   (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
 19  *   object_list is the main list holding the metadata (struct
 20  *   kmemleak_object) for the allocated memory blocks. The object trees are
 21  *   red black trees used to look-up metadata based on a pointer to the
 22  *   corresponding memory block. The kmemleak_object structures are added to
 23  *   the object_list and the object tree root in the create_object() function
 24  *   called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
 25  *   delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
 26  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
 27  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
 28  *   that some members of this structure may be protected by other means
 29  *   (atomic or kmemleak_lock). This lock is also held when scanning the
 30  *   corresponding memory block to avoid the kernel freeing it via the
 31  *   kmemleak_free() callback. This is less heavyweight than holding a global
 32  *   lock like kmemleak_lock during scanning.
 33  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
 34  *   unreferenced objects at a time. The gray_list contains the objects which
 35  *   are already referenced or marked as false positives and need to be
 36  *   scanned. This list is only modified during a scanning episode when the
 37  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
 38  *   Note that the kmemleak_object.use_count is incremented when an object is
 39  *   added to the gray_list and therefore cannot be freed. This mutex also
 40  *   prevents multiple users of the "kmemleak" debugfs file together with
 41  *   modifications to the memory scanning parameters including the scan_thread
 42  *   pointer
 43  *
 44  * Locks and mutexes are acquired/nested in the following order:
 45  *
 46  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
 47  *
 48  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
 49  * regions.
 50  *
 51  * The kmemleak_object structures have a use_count incremented or decremented
 52  * using the get_object()/put_object() functions. When the use_count becomes
 53  * 0, this count can no longer be incremented and put_object() schedules the
 54  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
 55  * function must be protected by rcu_read_lock() to avoid accessing a freed
 56  * structure.
 57  */
 58 
 59 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 60 
 61 #include <linux/init.h>
 62 #include <linux/kernel.h>
 63 #include <linux/list.h>
 64 #include <linux/sched/signal.h>
 65 #include <linux/sched/task.h>
 66 #include <linux/sched/task_stack.h>
 67 #include <linux/jiffies.h>
 68 #include <linux/delay.h>
 69 #include <linux/export.h>
 70 #include <linux/kthread.h>
 71 #include <linux/rbtree.h>
 72 #include <linux/fs.h>
 73 #include <linux/debugfs.h>
 74 #include <linux/seq_file.h>
 75 #include <linux/cpumask.h>
 76 #include <linux/spinlock.h>
 77 #include <linux/module.h>
 78 #include <linux/mutex.h>
 79 #include <linux/rcupdate.h>
 80 #include <linux/stacktrace.h>
 81 #include <linux/stackdepot.h>
 82 #include <linux/cache.h>
 83 #include <linux/percpu.h>
 84 #include <linux/memblock.h>
 85 #include <linux/pfn.h>
 86 #include <linux/mmzone.h>
 87 #include <linux/slab.h>
 88 #include <linux/thread_info.h>
 89 #include <linux/err.h>
 90 #include <linux/uaccess.h>
 91 #include <linux/string.h>
 92 #include <linux/nodemask.h>
 93 #include <linux/mm.h>
 94 #include <linux/workqueue.h>
 95 #include <linux/crc32.h>
 96 
 97 #include <asm/sections.h>
 98 #include <asm/processor.h>
 99 #include <linux/atomic.h>
100 
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105 
106 /*
107  * Kmemleak configuration and common defines.
108  */
109 #define MAX_TRACE               16      /* stack trace length */
110 #define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
111 #define SECS_FIRST_SCAN         60      /* delay before the first scan */
112 #define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
114 
115 #define BYTES_PER_POINTER       sizeof(void *)
116 
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area {
119         struct hlist_node node;
120         unsigned long start;
121         size_t size;
122 };
123 
124 #define KMEMLEAK_GREY   0
125 #define KMEMLEAK_BLACK  -1
126 
127 /*
128  * Structure holding the metadata for each allocated memory block.
129  * Modifications to such objects should be made while holding the
130  * object->lock. Insertions or deletions from object_list, gray_list or
131  * rb_node are already protected by the corresponding locks or mutex (see
132  * the notes on locking above). These objects are reference-counted
133  * (use_count) and freed using the RCU mechanism.
134  */
135 struct kmemleak_object {
136         raw_spinlock_t lock;
137         unsigned int flags;             /* object status flags */
138         struct list_head object_list;
139         struct list_head gray_list;
140         struct rb_node rb_node;
141         struct rcu_head rcu;            /* object_list lockless traversal */
142         /* object usage count; object freed when use_count == 0 */
143         atomic_t use_count;
144         unsigned int del_state;         /* deletion state */
145         unsigned long pointer;
146         size_t size;
147         /* pass surplus references to this pointer */
148         unsigned long excess_ref;
149         /* minimum number of a pointers found before it is considered leak */
150         int min_count;
151         /* the total number of pointers found pointing to this object */
152         int count;
153         /* checksum for detecting modified objects */
154         u32 checksum;
155         depot_stack_handle_t trace_handle;
156         /* memory ranges to be scanned inside an object (empty for all) */
157         struct hlist_head area_list;
158         unsigned long jiffies;          /* creation timestamp */
159         pid_t pid;                      /* pid of the current task */
160         char comm[TASK_COMM_LEN];       /* executable name */
161 };
162 
163 /* flag representing the memory block allocation status */
164 #define OBJECT_ALLOCATED        (1 << 0)
165 /* flag set after the first reporting of an unreference object */
166 #define OBJECT_REPORTED         (1 << 1)
167 /* flag set to not scan the object */
168 #define OBJECT_NO_SCAN          (1 << 2)
169 /* flag set to fully scan the object when scan_area allocation failed */
170 #define OBJECT_FULL_SCAN        (1 << 3)
171 /* flag set for object allocated with physical address */
172 #define OBJECT_PHYS             (1 << 4)
173 /* flag set for per-CPU pointers */
174 #define OBJECT_PERCPU           (1 << 5)
175 
176 /* set when __remove_object() called */
177 #define DELSTATE_REMOVED        (1 << 0)
178 /* set to temporarily prevent deletion from object_list */
179 #define DELSTATE_NO_DELETE      (1 << 1)
180 
181 #define HEX_PREFIX              "    "
182 /* number of bytes to print per line; must be 16 or 32 */
183 #define HEX_ROW_SIZE            16
184 /* number of bytes to print at a time (1, 2, 4, 8) */
185 #define HEX_GROUP_SIZE          1
186 /* include ASCII after the hex output */
187 #define HEX_ASCII               1
188 /* max number of lines to be printed */
189 #define HEX_MAX_LINES           2
190 
191 /* the list of all allocated objects */
192 static LIST_HEAD(object_list);
193 /* the list of gray-colored objects (see color_gray comment below) */
194 static LIST_HEAD(gray_list);
195 /* memory pool allocation */
196 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198 static LIST_HEAD(mem_pool_free_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
202 static struct rb_root object_phys_tree_root = RB_ROOT;
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
204 static struct rb_root object_percpu_tree_root = RB_ROOT;
205 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207 
208 /* allocation caches for kmemleak internal data */
209 static struct kmem_cache *object_cache;
210 static struct kmem_cache *scan_area_cache;
211 
212 /* set if tracing memory operations is enabled */
213 static int kmemleak_enabled = 1;
214 /* same as above but only for the kmemleak_free() callback */
215 static int kmemleak_free_enabled = 1;
216 /* set in the late_initcall if there were no errors */
217 static int kmemleak_late_initialized;
218 /* set if a kmemleak warning was issued */
219 static int kmemleak_warning;
220 /* set if a fatal kmemleak error has occurred */
221 static int kmemleak_error;
222 
223 /* minimum and maximum address that may be valid pointers */
224 static unsigned long min_addr = ULONG_MAX;
225 static unsigned long max_addr;
226 
227 static struct task_struct *scan_thread;
228 /* used to avoid reporting of recently allocated objects */
229 static unsigned long jiffies_min_age;
230 static unsigned long jiffies_last_scan;
231 /* delay between automatic memory scannings */
232 static unsigned long jiffies_scan_wait;
233 /* enables or disables the task stacks scanning */
234 static int kmemleak_stack_scan = 1;
235 /* protects the memory scanning, parameters and debug/kmemleak file access */
236 static DEFINE_MUTEX(scan_mutex);
237 /* setting kmemleak=on, will set this var, skipping the disable */
238 static int kmemleak_skip_disable;
239 /* If there are leaks that can be reported */
240 static bool kmemleak_found_leaks;
241 
242 static bool kmemleak_verbose;
243 module_param_named(verbose, kmemleak_verbose, bool, 0600);
244 
245 static void kmemleak_disable(void);
246 
247 /*
248  * Print a warning and dump the stack trace.
249  */
250 #define kmemleak_warn(x...)     do {            \
251         pr_warn(x);                             \
252         dump_stack();                           \
253         kmemleak_warning = 1;                   \
254 } while (0)
255 
256 /*
257  * Macro invoked when a serious kmemleak condition occurred and cannot be
258  * recovered from. Kmemleak will be disabled and further allocation/freeing
259  * tracing no longer available.
260  */
261 #define kmemleak_stop(x...)     do {    \
262         kmemleak_warn(x);               \
263         kmemleak_disable();             \
264 } while (0)
265 
266 #define warn_or_seq_printf(seq, fmt, ...)       do {    \
267         if (seq)                                        \
268                 seq_printf(seq, fmt, ##__VA_ARGS__);    \
269         else                                            \
270                 pr_warn(fmt, ##__VA_ARGS__);            \
271 } while (0)
272 
273 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
274                                  int rowsize, int groupsize, const void *buf,
275                                  size_t len, bool ascii)
276 {
277         if (seq)
278                 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
279                              buf, len, ascii);
280         else
281                 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
282                                rowsize, groupsize, buf, len, ascii);
283 }
284 
285 /*
286  * Printing of the objects hex dump to the seq file. The number of lines to be
287  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
288  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
289  * with the object->lock held.
290  */
291 static void hex_dump_object(struct seq_file *seq,
292                             struct kmemleak_object *object)
293 {
294         const u8 *ptr = (const u8 *)object->pointer;
295         size_t len;
296 
297         if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
298                 return;
299 
300         /* limit the number of lines to HEX_MAX_LINES */
301         len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
302 
303         warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
304         kasan_disable_current();
305         warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
306                              HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
307         kasan_enable_current();
308 }
309 
310 /*
311  * Object colors, encoded with count and min_count:
312  * - white - orphan object, not enough references to it (count < min_count)
313  * - gray  - not orphan, not marked as false positive (min_count == 0) or
314  *              sufficient references to it (count >= min_count)
315  * - black - ignore, it doesn't contain references (e.g. text section)
316  *              (min_count == -1). No function defined for this color.
317  * Newly created objects don't have any color assigned (object->count == -1)
318  * before the next memory scan when they become white.
319  */
320 static bool color_white(const struct kmemleak_object *object)
321 {
322         return object->count != KMEMLEAK_BLACK &&
323                 object->count < object->min_count;
324 }
325 
326 static bool color_gray(const struct kmemleak_object *object)
327 {
328         return object->min_count != KMEMLEAK_BLACK &&
329                 object->count >= object->min_count;
330 }
331 
332 /*
333  * Objects are considered unreferenced only if their color is white, they have
334  * not be deleted and have a minimum age to avoid false positives caused by
335  * pointers temporarily stored in CPU registers.
336  */
337 static bool unreferenced_object(struct kmemleak_object *object)
338 {
339         return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
340                 time_before_eq(object->jiffies + jiffies_min_age,
341                                jiffies_last_scan);
342 }
343 
344 /*
345  * Printing of the unreferenced objects information to the seq file. The
346  * print_unreferenced function must be called with the object->lock held.
347  */
348 static void print_unreferenced(struct seq_file *seq,
349                                struct kmemleak_object *object)
350 {
351         int i;
352         unsigned long *entries;
353         unsigned int nr_entries;
354 
355         nr_entries = stack_depot_fetch(object->trace_handle, &entries);
356         warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
357                           object->pointer, object->size);
358         warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
359                            object->comm, object->pid, object->jiffies);
360         hex_dump_object(seq, object);
361         warn_or_seq_printf(seq, "  backtrace (crc %x):\n", object->checksum);
362 
363         for (i = 0; i < nr_entries; i++) {
364                 void *ptr = (void *)entries[i];
365                 warn_or_seq_printf(seq, "    [<%pK>] %pS\n", ptr, ptr);
366         }
367 }
368 
369 /*
370  * Print the kmemleak_object information. This function is used mainly for
371  * debugging special cases when kmemleak operations. It must be called with
372  * the object->lock held.
373  */
374 static void dump_object_info(struct kmemleak_object *object)
375 {
376         pr_notice("Object 0x%08lx (size %zu):\n",
377                         object->pointer, object->size);
378         pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
379                         object->comm, object->pid, object->jiffies);
380         pr_notice("  min_count = %d\n", object->min_count);
381         pr_notice("  count = %d\n", object->count);
382         pr_notice("  flags = 0x%x\n", object->flags);
383         pr_notice("  checksum = %u\n", object->checksum);
384         pr_notice("  backtrace:\n");
385         if (object->trace_handle)
386                 stack_depot_print(object->trace_handle);
387 }
388 
389 static struct rb_root *object_tree(unsigned long objflags)
390 {
391         if (objflags & OBJECT_PHYS)
392                 return &object_phys_tree_root;
393         if (objflags & OBJECT_PERCPU)
394                 return &object_percpu_tree_root;
395         return &object_tree_root;
396 }
397 
398 /*
399  * Look-up a memory block metadata (kmemleak_object) in the object search
400  * tree based on a pointer value. If alias is 0, only values pointing to the
401  * beginning of the memory block are allowed. The kmemleak_lock must be held
402  * when calling this function.
403  */
404 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
405                                                unsigned int objflags)
406 {
407         struct rb_node *rb = object_tree(objflags)->rb_node;
408         unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
409 
410         while (rb) {
411                 struct kmemleak_object *object;
412                 unsigned long untagged_objp;
413 
414                 object = rb_entry(rb, struct kmemleak_object, rb_node);
415                 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
416 
417                 if (untagged_ptr < untagged_objp)
418                         rb = object->rb_node.rb_left;
419                 else if (untagged_objp + object->size <= untagged_ptr)
420                         rb = object->rb_node.rb_right;
421                 else if (untagged_objp == untagged_ptr || alias)
422                         return object;
423                 else {
424                         kmemleak_warn("Found object by alias at 0x%08lx\n",
425                                       ptr);
426                         dump_object_info(object);
427                         break;
428                 }
429         }
430         return NULL;
431 }
432 
433 /* Look-up a kmemleak object which allocated with virtual address. */
434 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
435 {
436         return __lookup_object(ptr, alias, 0);
437 }
438 
439 /*
440  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
441  * that once an object's use_count reached 0, the RCU freeing was already
442  * registered and the object should no longer be used. This function must be
443  * called under the protection of rcu_read_lock().
444  */
445 static int get_object(struct kmemleak_object *object)
446 {
447         return atomic_inc_not_zero(&object->use_count);
448 }
449 
450 /*
451  * Memory pool allocation and freeing. kmemleak_lock must not be held.
452  */
453 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
454 {
455         unsigned long flags;
456         struct kmemleak_object *object;
457 
458         /* try the slab allocator first */
459         if (object_cache) {
460                 object = kmem_cache_alloc_noprof(object_cache,
461                                                  gfp_nested_mask(gfp));
462                 if (object)
463                         return object;
464         }
465 
466         /* slab allocation failed, try the memory pool */
467         raw_spin_lock_irqsave(&kmemleak_lock, flags);
468         object = list_first_entry_or_null(&mem_pool_free_list,
469                                           typeof(*object), object_list);
470         if (object)
471                 list_del(&object->object_list);
472         else if (mem_pool_free_count)
473                 object = &mem_pool[--mem_pool_free_count];
474         else
475                 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
476         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
477 
478         return object;
479 }
480 
481 /*
482  * Return the object to either the slab allocator or the memory pool.
483  */
484 static void mem_pool_free(struct kmemleak_object *object)
485 {
486         unsigned long flags;
487 
488         if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
489                 kmem_cache_free(object_cache, object);
490                 return;
491         }
492 
493         /* add the object to the memory pool free list */
494         raw_spin_lock_irqsave(&kmemleak_lock, flags);
495         list_add(&object->object_list, &mem_pool_free_list);
496         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
497 }
498 
499 /*
500  * RCU callback to free a kmemleak_object.
501  */
502 static void free_object_rcu(struct rcu_head *rcu)
503 {
504         struct hlist_node *tmp;
505         struct kmemleak_scan_area *area;
506         struct kmemleak_object *object =
507                 container_of(rcu, struct kmemleak_object, rcu);
508 
509         /*
510          * Once use_count is 0 (guaranteed by put_object), there is no other
511          * code accessing this object, hence no need for locking.
512          */
513         hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
514                 hlist_del(&area->node);
515                 kmem_cache_free(scan_area_cache, area);
516         }
517         mem_pool_free(object);
518 }
519 
520 /*
521  * Decrement the object use_count. Once the count is 0, free the object using
522  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
523  * delete_object() path, the delayed RCU freeing ensures that there is no
524  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
525  * is also possible.
526  */
527 static void put_object(struct kmemleak_object *object)
528 {
529         if (!atomic_dec_and_test(&object->use_count))
530                 return;
531 
532         /* should only get here after delete_object was called */
533         WARN_ON(object->flags & OBJECT_ALLOCATED);
534 
535         /*
536          * It may be too early for the RCU callbacks, however, there is no
537          * concurrent object_list traversal when !object_cache and all objects
538          * came from the memory pool. Free the object directly.
539          */
540         if (object_cache)
541                 call_rcu(&object->rcu, free_object_rcu);
542         else
543                 free_object_rcu(&object->rcu);
544 }
545 
546 /*
547  * Look up an object in the object search tree and increase its use_count.
548  */
549 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
550                                                      unsigned int objflags)
551 {
552         unsigned long flags;
553         struct kmemleak_object *object;
554 
555         rcu_read_lock();
556         raw_spin_lock_irqsave(&kmemleak_lock, flags);
557         object = __lookup_object(ptr, alias, objflags);
558         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
559 
560         /* check whether the object is still available */
561         if (object && !get_object(object))
562                 object = NULL;
563         rcu_read_unlock();
564 
565         return object;
566 }
567 
568 /* Look up and get an object which allocated with virtual address. */
569 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
570 {
571         return __find_and_get_object(ptr, alias, 0);
572 }
573 
574 /*
575  * Remove an object from its object tree and object_list. Must be called with
576  * the kmemleak_lock held _if_ kmemleak is still enabled.
577  */
578 static void __remove_object(struct kmemleak_object *object)
579 {
580         rb_erase(&object->rb_node, object_tree(object->flags));
581         if (!(object->del_state & DELSTATE_NO_DELETE))
582                 list_del_rcu(&object->object_list);
583         object->del_state |= DELSTATE_REMOVED;
584 }
585 
586 static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
587                                                         int alias,
588                                                         unsigned int objflags)
589 {
590         struct kmemleak_object *object;
591 
592         object = __lookup_object(ptr, alias, objflags);
593         if (object)
594                 __remove_object(object);
595 
596         return object;
597 }
598 
599 /*
600  * Look up an object in the object search tree and remove it from both object
601  * tree root and object_list. The returned object's use_count should be at
602  * least 1, as initially set by create_object().
603  */
604 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
605                                                       unsigned int objflags)
606 {
607         unsigned long flags;
608         struct kmemleak_object *object;
609 
610         raw_spin_lock_irqsave(&kmemleak_lock, flags);
611         object = __find_and_remove_object(ptr, alias, objflags);
612         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
613 
614         return object;
615 }
616 
617 static noinline depot_stack_handle_t set_track_prepare(void)
618 {
619         depot_stack_handle_t trace_handle;
620         unsigned long entries[MAX_TRACE];
621         unsigned int nr_entries;
622 
623         /*
624          * Use object_cache to determine whether kmemleak_init() has
625          * been invoked. stack_depot_early_init() is called before
626          * kmemleak_init() in mm_core_init().
627          */
628         if (!object_cache)
629                 return 0;
630         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
631         trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
632 
633         return trace_handle;
634 }
635 
636 static struct kmemleak_object *__alloc_object(gfp_t gfp)
637 {
638         struct kmemleak_object *object;
639 
640         object = mem_pool_alloc(gfp);
641         if (!object) {
642                 pr_warn("Cannot allocate a kmemleak_object structure\n");
643                 kmemleak_disable();
644                 return NULL;
645         }
646 
647         INIT_LIST_HEAD(&object->object_list);
648         INIT_LIST_HEAD(&object->gray_list);
649         INIT_HLIST_HEAD(&object->area_list);
650         raw_spin_lock_init(&object->lock);
651         atomic_set(&object->use_count, 1);
652         object->excess_ref = 0;
653         object->count = 0;                      /* white color initially */
654         object->checksum = 0;
655         object->del_state = 0;
656 
657         /* task information */
658         if (in_hardirq()) {
659                 object->pid = 0;
660                 strscpy(object->comm, "hardirq");
661         } else if (in_serving_softirq()) {
662                 object->pid = 0;
663                 strscpy(object->comm, "softirq");
664         } else {
665                 object->pid = current->pid;
666                 /*
667                  * There is a small chance of a race with set_task_comm(),
668                  * however using get_task_comm() here may cause locking
669                  * dependency issues with current->alloc_lock. In the worst
670                  * case, the command line is not correct.
671                  */
672                 strscpy(object->comm, current->comm);
673         }
674 
675         /* kernel backtrace */
676         object->trace_handle = set_track_prepare();
677 
678         return object;
679 }
680 
681 static int __link_object(struct kmemleak_object *object, unsigned long ptr,
682                          size_t size, int min_count, unsigned int objflags)
683 {
684 
685         struct kmemleak_object *parent;
686         struct rb_node **link, *rb_parent;
687         unsigned long untagged_ptr;
688         unsigned long untagged_objp;
689 
690         object->flags = OBJECT_ALLOCATED | objflags;
691         object->pointer = ptr;
692         object->size = kfence_ksize((void *)ptr) ?: size;
693         object->min_count = min_count;
694         object->jiffies = jiffies;
695 
696         untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
697         /*
698          * Only update min_addr and max_addr with object
699          * storing virtual address.
700          */
701         if (!(objflags & (OBJECT_PHYS | OBJECT_PERCPU))) {
702                 min_addr = min(min_addr, untagged_ptr);
703                 max_addr = max(max_addr, untagged_ptr + size);
704         }
705         link = &object_tree(objflags)->rb_node;
706         rb_parent = NULL;
707         while (*link) {
708                 rb_parent = *link;
709                 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
710                 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
711                 if (untagged_ptr + size <= untagged_objp)
712                         link = &parent->rb_node.rb_left;
713                 else if (untagged_objp + parent->size <= untagged_ptr)
714                         link = &parent->rb_node.rb_right;
715                 else {
716                         kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
717                                       ptr);
718                         /*
719                          * No need for parent->lock here since "parent" cannot
720                          * be freed while the kmemleak_lock is held.
721                          */
722                         dump_object_info(parent);
723                         return -EEXIST;
724                 }
725         }
726         rb_link_node(&object->rb_node, rb_parent, link);
727         rb_insert_color(&object->rb_node, object_tree(objflags));
728         list_add_tail_rcu(&object->object_list, &object_list);
729 
730         return 0;
731 }
732 
733 /*
734  * Create the metadata (struct kmemleak_object) corresponding to an allocated
735  * memory block and add it to the object_list and object tree.
736  */
737 static void __create_object(unsigned long ptr, size_t size,
738                                 int min_count, gfp_t gfp, unsigned int objflags)
739 {
740         struct kmemleak_object *object;
741         unsigned long flags;
742         int ret;
743 
744         object = __alloc_object(gfp);
745         if (!object)
746                 return;
747 
748         raw_spin_lock_irqsave(&kmemleak_lock, flags);
749         ret = __link_object(object, ptr, size, min_count, objflags);
750         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
751         if (ret)
752                 mem_pool_free(object);
753 }
754 
755 /* Create kmemleak object which allocated with virtual address. */
756 static void create_object(unsigned long ptr, size_t size,
757                           int min_count, gfp_t gfp)
758 {
759         __create_object(ptr, size, min_count, gfp, 0);
760 }
761 
762 /* Create kmemleak object which allocated with physical address. */
763 static void create_object_phys(unsigned long ptr, size_t size,
764                                int min_count, gfp_t gfp)
765 {
766         __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
767 }
768 
769 /* Create kmemleak object corresponding to a per-CPU allocation. */
770 static void create_object_percpu(unsigned long ptr, size_t size,
771                                  int min_count, gfp_t gfp)
772 {
773         __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
774 }
775 
776 /*
777  * Mark the object as not allocated and schedule RCU freeing via put_object().
778  */
779 static void __delete_object(struct kmemleak_object *object)
780 {
781         unsigned long flags;
782 
783         WARN_ON(!(object->flags & OBJECT_ALLOCATED));
784         WARN_ON(atomic_read(&object->use_count) < 1);
785 
786         /*
787          * Locking here also ensures that the corresponding memory block
788          * cannot be freed when it is being scanned.
789          */
790         raw_spin_lock_irqsave(&object->lock, flags);
791         object->flags &= ~OBJECT_ALLOCATED;
792         raw_spin_unlock_irqrestore(&object->lock, flags);
793         put_object(object);
794 }
795 
796 /*
797  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
798  * delete it.
799  */
800 static void delete_object_full(unsigned long ptr, unsigned int objflags)
801 {
802         struct kmemleak_object *object;
803 
804         object = find_and_remove_object(ptr, 0, objflags);
805         if (!object) {
806 #ifdef DEBUG
807                 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
808                               ptr);
809 #endif
810                 return;
811         }
812         __delete_object(object);
813 }
814 
815 /*
816  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
817  * delete it. If the memory block is partially freed, the function may create
818  * additional metadata for the remaining parts of the block.
819  */
820 static void delete_object_part(unsigned long ptr, size_t size,
821                                unsigned int objflags)
822 {
823         struct kmemleak_object *object, *object_l, *object_r;
824         unsigned long start, end, flags;
825 
826         object_l = __alloc_object(GFP_KERNEL);
827         if (!object_l)
828                 return;
829 
830         object_r = __alloc_object(GFP_KERNEL);
831         if (!object_r)
832                 goto out;
833 
834         raw_spin_lock_irqsave(&kmemleak_lock, flags);
835         object = __find_and_remove_object(ptr, 1, objflags);
836         if (!object) {
837 #ifdef DEBUG
838                 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
839                               ptr, size);
840 #endif
841                 goto unlock;
842         }
843 
844         /*
845          * Create one or two objects that may result from the memory block
846          * split. Note that partial freeing is only done by free_bootmem() and
847          * this happens before kmemleak_init() is called.
848          */
849         start = object->pointer;
850         end = object->pointer + object->size;
851         if ((ptr > start) &&
852             !__link_object(object_l, start, ptr - start,
853                            object->min_count, objflags))
854                 object_l = NULL;
855         if ((ptr + size < end) &&
856             !__link_object(object_r, ptr + size, end - ptr - size,
857                            object->min_count, objflags))
858                 object_r = NULL;
859 
860 unlock:
861         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
862         if (object)
863                 __delete_object(object);
864 
865 out:
866         if (object_l)
867                 mem_pool_free(object_l);
868         if (object_r)
869                 mem_pool_free(object_r);
870 }
871 
872 static void __paint_it(struct kmemleak_object *object, int color)
873 {
874         object->min_count = color;
875         if (color == KMEMLEAK_BLACK)
876                 object->flags |= OBJECT_NO_SCAN;
877 }
878 
879 static void paint_it(struct kmemleak_object *object, int color)
880 {
881         unsigned long flags;
882 
883         raw_spin_lock_irqsave(&object->lock, flags);
884         __paint_it(object, color);
885         raw_spin_unlock_irqrestore(&object->lock, flags);
886 }
887 
888 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
889 {
890         struct kmemleak_object *object;
891 
892         object = __find_and_get_object(ptr, 0, objflags);
893         if (!object) {
894                 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
895                               ptr,
896                               (color == KMEMLEAK_GREY) ? "Grey" :
897                               (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
898                 return;
899         }
900         paint_it(object, color);
901         put_object(object);
902 }
903 
904 /*
905  * Mark an object permanently as gray-colored so that it can no longer be
906  * reported as a leak. This is used in general to mark a false positive.
907  */
908 static void make_gray_object(unsigned long ptr)
909 {
910         paint_ptr(ptr, KMEMLEAK_GREY, 0);
911 }
912 
913 /*
914  * Mark the object as black-colored so that it is ignored from scans and
915  * reporting.
916  */
917 static void make_black_object(unsigned long ptr, unsigned int objflags)
918 {
919         paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
920 }
921 
922 /*
923  * Add a scanning area to the object. If at least one such area is added,
924  * kmemleak will only scan these ranges rather than the whole memory block.
925  */
926 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
927 {
928         unsigned long flags;
929         struct kmemleak_object *object;
930         struct kmemleak_scan_area *area = NULL;
931         unsigned long untagged_ptr;
932         unsigned long untagged_objp;
933 
934         object = find_and_get_object(ptr, 1);
935         if (!object) {
936                 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
937                               ptr);
938                 return;
939         }
940 
941         untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
942         untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
943 
944         if (scan_area_cache)
945                 area = kmem_cache_alloc_noprof(scan_area_cache,
946                                                gfp_nested_mask(gfp));
947 
948         raw_spin_lock_irqsave(&object->lock, flags);
949         if (!area) {
950                 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
951                 /* mark the object for full scan to avoid false positives */
952                 object->flags |= OBJECT_FULL_SCAN;
953                 goto out_unlock;
954         }
955         if (size == SIZE_MAX) {
956                 size = untagged_objp + object->size - untagged_ptr;
957         } else if (untagged_ptr + size > untagged_objp + object->size) {
958                 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
959                 dump_object_info(object);
960                 kmem_cache_free(scan_area_cache, area);
961                 goto out_unlock;
962         }
963 
964         INIT_HLIST_NODE(&area->node);
965         area->start = ptr;
966         area->size = size;
967 
968         hlist_add_head(&area->node, &object->area_list);
969 out_unlock:
970         raw_spin_unlock_irqrestore(&object->lock, flags);
971         put_object(object);
972 }
973 
974 /*
975  * Any surplus references (object already gray) to 'ptr' are passed to
976  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
977  * vm_struct may be used as an alternative reference to the vmalloc'ed object
978  * (see free_thread_stack()).
979  */
980 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
981 {
982         unsigned long flags;
983         struct kmemleak_object *object;
984 
985         object = find_and_get_object(ptr, 0);
986         if (!object) {
987                 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
988                               ptr);
989                 return;
990         }
991 
992         raw_spin_lock_irqsave(&object->lock, flags);
993         object->excess_ref = excess_ref;
994         raw_spin_unlock_irqrestore(&object->lock, flags);
995         put_object(object);
996 }
997 
998 /*
999  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
1000  * pointer. Such object will not be scanned by kmemleak but references to it
1001  * are searched.
1002  */
1003 static void object_no_scan(unsigned long ptr)
1004 {
1005         unsigned long flags;
1006         struct kmemleak_object *object;
1007 
1008         object = find_and_get_object(ptr, 0);
1009         if (!object) {
1010                 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1011                 return;
1012         }
1013 
1014         raw_spin_lock_irqsave(&object->lock, flags);
1015         object->flags |= OBJECT_NO_SCAN;
1016         raw_spin_unlock_irqrestore(&object->lock, flags);
1017         put_object(object);
1018 }
1019 
1020 /**
1021  * kmemleak_alloc - register a newly allocated object
1022  * @ptr:        pointer to beginning of the object
1023  * @size:       size of the object
1024  * @min_count:  minimum number of references to this object. If during memory
1025  *              scanning a number of references less than @min_count is found,
1026  *              the object is reported as a memory leak. If @min_count is 0,
1027  *              the object is never reported as a leak. If @min_count is -1,
1028  *              the object is ignored (not scanned and not reported as a leak)
1029  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1030  *
1031  * This function is called from the kernel allocators when a new object
1032  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1033  */
1034 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1035                           gfp_t gfp)
1036 {
1037         pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1038 
1039         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1040                 create_object((unsigned long)ptr, size, min_count, gfp);
1041 }
1042 EXPORT_SYMBOL_GPL(kmemleak_alloc);
1043 
1044 /**
1045  * kmemleak_alloc_percpu - register a newly allocated __percpu object
1046  * @ptr:        __percpu pointer to beginning of the object
1047  * @size:       size of the object
1048  * @gfp:        flags used for kmemleak internal memory allocations
1049  *
1050  * This function is called from the kernel percpu allocator when a new object
1051  * (memory block) is allocated (alloc_percpu).
1052  */
1053 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1054                                  gfp_t gfp)
1055 {
1056         pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1057 
1058         /*
1059          * Percpu allocations are only scanned and not reported as leaks
1060          * (min_count is set to 0).
1061          */
1062         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1063                 create_object_percpu((unsigned long)ptr, size, 0, gfp);
1064 }
1065 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1066 
1067 /**
1068  * kmemleak_vmalloc - register a newly vmalloc'ed object
1069  * @area:       pointer to vm_struct
1070  * @size:       size of the object
1071  * @gfp:        __vmalloc() flags used for kmemleak internal memory allocations
1072  *
1073  * This function is called from the vmalloc() kernel allocator when a new
1074  * object (memory block) is allocated.
1075  */
1076 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1077 {
1078         pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1079 
1080         /*
1081          * A min_count = 2 is needed because vm_struct contains a reference to
1082          * the virtual address of the vmalloc'ed block.
1083          */
1084         if (kmemleak_enabled) {
1085                 create_object((unsigned long)area->addr, size, 2, gfp);
1086                 object_set_excess_ref((unsigned long)area,
1087                                       (unsigned long)area->addr);
1088         }
1089 }
1090 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1091 
1092 /**
1093  * kmemleak_free - unregister a previously registered object
1094  * @ptr:        pointer to beginning of the object
1095  *
1096  * This function is called from the kernel allocators when an object (memory
1097  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1098  */
1099 void __ref kmemleak_free(const void *ptr)
1100 {
1101         pr_debug("%s(0x%px)\n", __func__, ptr);
1102 
1103         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1104                 delete_object_full((unsigned long)ptr, 0);
1105 }
1106 EXPORT_SYMBOL_GPL(kmemleak_free);
1107 
1108 /**
1109  * kmemleak_free_part - partially unregister a previously registered object
1110  * @ptr:        pointer to the beginning or inside the object. This also
1111  *              represents the start of the range to be freed
1112  * @size:       size to be unregistered
1113  *
1114  * This function is called when only a part of a memory block is freed
1115  * (usually from the bootmem allocator).
1116  */
1117 void __ref kmemleak_free_part(const void *ptr, size_t size)
1118 {
1119         pr_debug("%s(0x%px)\n", __func__, ptr);
1120 
1121         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1122                 delete_object_part((unsigned long)ptr, size, 0);
1123 }
1124 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1125 
1126 /**
1127  * kmemleak_free_percpu - unregister a previously registered __percpu object
1128  * @ptr:        __percpu pointer to beginning of the object
1129  *
1130  * This function is called from the kernel percpu allocator when an object
1131  * (memory block) is freed (free_percpu).
1132  */
1133 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1134 {
1135         pr_debug("%s(0x%px)\n", __func__, ptr);
1136 
1137         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1138                 delete_object_full((unsigned long)ptr, OBJECT_PERCPU);
1139 }
1140 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1141 
1142 /**
1143  * kmemleak_update_trace - update object allocation stack trace
1144  * @ptr:        pointer to beginning of the object
1145  *
1146  * Override the object allocation stack trace for cases where the actual
1147  * allocation place is not always useful.
1148  */
1149 void __ref kmemleak_update_trace(const void *ptr)
1150 {
1151         struct kmemleak_object *object;
1152         depot_stack_handle_t trace_handle;
1153         unsigned long flags;
1154 
1155         pr_debug("%s(0x%px)\n", __func__, ptr);
1156 
1157         if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1158                 return;
1159 
1160         object = find_and_get_object((unsigned long)ptr, 1);
1161         if (!object) {
1162 #ifdef DEBUG
1163                 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1164                               ptr);
1165 #endif
1166                 return;
1167         }
1168 
1169         trace_handle = set_track_prepare();
1170         raw_spin_lock_irqsave(&object->lock, flags);
1171         object->trace_handle = trace_handle;
1172         raw_spin_unlock_irqrestore(&object->lock, flags);
1173 
1174         put_object(object);
1175 }
1176 EXPORT_SYMBOL(kmemleak_update_trace);
1177 
1178 /**
1179  * kmemleak_not_leak - mark an allocated object as false positive
1180  * @ptr:        pointer to beginning of the object
1181  *
1182  * Calling this function on an object will cause the memory block to no longer
1183  * be reported as leak and always be scanned.
1184  */
1185 void __ref kmemleak_not_leak(const void *ptr)
1186 {
1187         pr_debug("%s(0x%px)\n", __func__, ptr);
1188 
1189         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1190                 make_gray_object((unsigned long)ptr);
1191 }
1192 EXPORT_SYMBOL(kmemleak_not_leak);
1193 
1194 /**
1195  * kmemleak_ignore - ignore an allocated object
1196  * @ptr:        pointer to beginning of the object
1197  *
1198  * Calling this function on an object will cause the memory block to be
1199  * ignored (not scanned and not reported as a leak). This is usually done when
1200  * it is known that the corresponding block is not a leak and does not contain
1201  * any references to other allocated memory blocks.
1202  */
1203 void __ref kmemleak_ignore(const void *ptr)
1204 {
1205         pr_debug("%s(0x%px)\n", __func__, ptr);
1206 
1207         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1208                 make_black_object((unsigned long)ptr, 0);
1209 }
1210 EXPORT_SYMBOL(kmemleak_ignore);
1211 
1212 /**
1213  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1214  * @ptr:        pointer to beginning or inside the object. This also
1215  *              represents the start of the scan area
1216  * @size:       size of the scan area
1217  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1218  *
1219  * This function is used when it is known that only certain parts of an object
1220  * contain references to other objects. Kmemleak will only scan these areas
1221  * reducing the number false negatives.
1222  */
1223 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1224 {
1225         pr_debug("%s(0x%px)\n", __func__, ptr);
1226 
1227         if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1228                 add_scan_area((unsigned long)ptr, size, gfp);
1229 }
1230 EXPORT_SYMBOL(kmemleak_scan_area);
1231 
1232 /**
1233  * kmemleak_no_scan - do not scan an allocated object
1234  * @ptr:        pointer to beginning of the object
1235  *
1236  * This function notifies kmemleak not to scan the given memory block. Useful
1237  * in situations where it is known that the given object does not contain any
1238  * references to other objects. Kmemleak will not scan such objects reducing
1239  * the number of false negatives.
1240  */
1241 void __ref kmemleak_no_scan(const void *ptr)
1242 {
1243         pr_debug("%s(0x%px)\n", __func__, ptr);
1244 
1245         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1246                 object_no_scan((unsigned long)ptr);
1247 }
1248 EXPORT_SYMBOL(kmemleak_no_scan);
1249 
1250 /**
1251  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1252  *                       address argument
1253  * @phys:       physical address of the object
1254  * @size:       size of the object
1255  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1256  */
1257 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1258 {
1259         pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1260 
1261         if (kmemleak_enabled)
1262                 /*
1263                  * Create object with OBJECT_PHYS flag and
1264                  * assume min_count 0.
1265                  */
1266                 create_object_phys((unsigned long)phys, size, 0, gfp);
1267 }
1268 EXPORT_SYMBOL(kmemleak_alloc_phys);
1269 
1270 /**
1271  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1272  *                           physical address argument
1273  * @phys:       physical address if the beginning or inside an object. This
1274  *              also represents the start of the range to be freed
1275  * @size:       size to be unregistered
1276  */
1277 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1278 {
1279         pr_debug("%s(0x%px)\n", __func__, &phys);
1280 
1281         if (kmemleak_enabled)
1282                 delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1283 }
1284 EXPORT_SYMBOL(kmemleak_free_part_phys);
1285 
1286 /**
1287  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1288  *                        address argument
1289  * @phys:       physical address of the object
1290  */
1291 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1292 {
1293         pr_debug("%s(0x%px)\n", __func__, &phys);
1294 
1295         if (kmemleak_enabled)
1296                 make_black_object((unsigned long)phys, OBJECT_PHYS);
1297 }
1298 EXPORT_SYMBOL(kmemleak_ignore_phys);
1299 
1300 /*
1301  * Update an object's checksum and return true if it was modified.
1302  */
1303 static bool update_checksum(struct kmemleak_object *object)
1304 {
1305         u32 old_csum = object->checksum;
1306 
1307         if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
1308                 return false;
1309 
1310         kasan_disable_current();
1311         kcsan_disable_current();
1312         object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1313         kasan_enable_current();
1314         kcsan_enable_current();
1315 
1316         return object->checksum != old_csum;
1317 }
1318 
1319 /*
1320  * Update an object's references. object->lock must be held by the caller.
1321  */
1322 static void update_refs(struct kmemleak_object *object)
1323 {
1324         if (!color_white(object)) {
1325                 /* non-orphan, ignored or new */
1326                 return;
1327         }
1328 
1329         /*
1330          * Increase the object's reference count (number of pointers to the
1331          * memory block). If this count reaches the required minimum, the
1332          * object's color will become gray and it will be added to the
1333          * gray_list.
1334          */
1335         object->count++;
1336         if (color_gray(object)) {
1337                 /* put_object() called when removing from gray_list */
1338                 WARN_ON(!get_object(object));
1339                 list_add_tail(&object->gray_list, &gray_list);
1340         }
1341 }
1342 
1343 /*
1344  * Memory scanning is a long process and it needs to be interruptible. This
1345  * function checks whether such interrupt condition occurred.
1346  */
1347 static int scan_should_stop(void)
1348 {
1349         if (!kmemleak_enabled)
1350                 return 1;
1351 
1352         /*
1353          * This function may be called from either process or kthread context,
1354          * hence the need to check for both stop conditions.
1355          */
1356         if (current->mm)
1357                 return signal_pending(current);
1358         else
1359                 return kthread_should_stop();
1360 
1361         return 0;
1362 }
1363 
1364 /*
1365  * Scan a memory block (exclusive range) for valid pointers and add those
1366  * found to the gray list.
1367  */
1368 static void scan_block(void *_start, void *_end,
1369                        struct kmemleak_object *scanned)
1370 {
1371         unsigned long *ptr;
1372         unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1373         unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1374         unsigned long flags;
1375         unsigned long untagged_ptr;
1376 
1377         raw_spin_lock_irqsave(&kmemleak_lock, flags);
1378         for (ptr = start; ptr < end; ptr++) {
1379                 struct kmemleak_object *object;
1380                 unsigned long pointer;
1381                 unsigned long excess_ref;
1382 
1383                 if (scan_should_stop())
1384                         break;
1385 
1386                 kasan_disable_current();
1387                 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1388                 kasan_enable_current();
1389 
1390                 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1391                 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1392                         continue;
1393 
1394                 /*
1395                  * No need for get_object() here since we hold kmemleak_lock.
1396                  * object->use_count cannot be dropped to 0 while the object
1397                  * is still present in object_tree_root and object_list
1398                  * (with updates protected by kmemleak_lock).
1399                  */
1400                 object = lookup_object(pointer, 1);
1401                 if (!object)
1402                         continue;
1403                 if (object == scanned)
1404                         /* self referenced, ignore */
1405                         continue;
1406 
1407                 /*
1408                  * Avoid the lockdep recursive warning on object->lock being
1409                  * previously acquired in scan_object(). These locks are
1410                  * enclosed by scan_mutex.
1411                  */
1412                 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1413                 /* only pass surplus references (object already gray) */
1414                 if (color_gray(object)) {
1415                         excess_ref = object->excess_ref;
1416                         /* no need for update_refs() if object already gray */
1417                 } else {
1418                         excess_ref = 0;
1419                         update_refs(object);
1420                 }
1421                 raw_spin_unlock(&object->lock);
1422 
1423                 if (excess_ref) {
1424                         object = lookup_object(excess_ref, 0);
1425                         if (!object)
1426                                 continue;
1427                         if (object == scanned)
1428                                 /* circular reference, ignore */
1429                                 continue;
1430                         raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1431                         update_refs(object);
1432                         raw_spin_unlock(&object->lock);
1433                 }
1434         }
1435         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1436 }
1437 
1438 /*
1439  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1440  */
1441 #ifdef CONFIG_SMP
1442 static void scan_large_block(void *start, void *end)
1443 {
1444         void *next;
1445 
1446         while (start < end) {
1447                 next = min(start + MAX_SCAN_SIZE, end);
1448                 scan_block(start, next, NULL);
1449                 start = next;
1450                 cond_resched();
1451         }
1452 }
1453 #endif
1454 
1455 /*
1456  * Scan a memory block corresponding to a kmemleak_object. A condition is
1457  * that object->use_count >= 1.
1458  */
1459 static void scan_object(struct kmemleak_object *object)
1460 {
1461         struct kmemleak_scan_area *area;
1462         unsigned long flags;
1463 
1464         /*
1465          * Once the object->lock is acquired, the corresponding memory block
1466          * cannot be freed (the same lock is acquired in delete_object).
1467          */
1468         raw_spin_lock_irqsave(&object->lock, flags);
1469         if (object->flags & OBJECT_NO_SCAN)
1470                 goto out;
1471         if (!(object->flags & OBJECT_ALLOCATED))
1472                 /* already freed object */
1473                 goto out;
1474 
1475         if (object->flags & OBJECT_PERCPU) {
1476                 unsigned int cpu;
1477 
1478                 for_each_possible_cpu(cpu) {
1479                         void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1480                         void *end = start + object->size;
1481 
1482                         scan_block(start, end, object);
1483 
1484                         raw_spin_unlock_irqrestore(&object->lock, flags);
1485                         cond_resched();
1486                         raw_spin_lock_irqsave(&object->lock, flags);
1487                         if (!(object->flags & OBJECT_ALLOCATED))
1488                                 break;
1489                 }
1490         } else if (hlist_empty(&object->area_list) ||
1491             object->flags & OBJECT_FULL_SCAN) {
1492                 void *start = object->flags & OBJECT_PHYS ?
1493                                 __va((phys_addr_t)object->pointer) :
1494                                 (void *)object->pointer;
1495                 void *end = start + object->size;
1496                 void *next;
1497 
1498                 do {
1499                         next = min(start + MAX_SCAN_SIZE, end);
1500                         scan_block(start, next, object);
1501 
1502                         start = next;
1503                         if (start >= end)
1504                                 break;
1505 
1506                         raw_spin_unlock_irqrestore(&object->lock, flags);
1507                         cond_resched();
1508                         raw_spin_lock_irqsave(&object->lock, flags);
1509                 } while (object->flags & OBJECT_ALLOCATED);
1510         } else {
1511                 hlist_for_each_entry(area, &object->area_list, node)
1512                         scan_block((void *)area->start,
1513                                    (void *)(area->start + area->size),
1514                                    object);
1515         }
1516 out:
1517         raw_spin_unlock_irqrestore(&object->lock, flags);
1518 }
1519 
1520 /*
1521  * Scan the objects already referenced (gray objects). More objects will be
1522  * referenced and, if there are no memory leaks, all the objects are scanned.
1523  */
1524 static void scan_gray_list(void)
1525 {
1526         struct kmemleak_object *object, *tmp;
1527 
1528         /*
1529          * The list traversal is safe for both tail additions and removals
1530          * from inside the loop. The kmemleak objects cannot be freed from
1531          * outside the loop because their use_count was incremented.
1532          */
1533         object = list_entry(gray_list.next, typeof(*object), gray_list);
1534         while (&object->gray_list != &gray_list) {
1535                 cond_resched();
1536 
1537                 /* may add new objects to the list */
1538                 if (!scan_should_stop())
1539                         scan_object(object);
1540 
1541                 tmp = list_entry(object->gray_list.next, typeof(*object),
1542                                  gray_list);
1543 
1544                 /* remove the object from the list and release it */
1545                 list_del(&object->gray_list);
1546                 put_object(object);
1547 
1548                 object = tmp;
1549         }
1550         WARN_ON(!list_empty(&gray_list));
1551 }
1552 
1553 /*
1554  * Conditionally call resched() in an object iteration loop while making sure
1555  * that the given object won't go away without RCU read lock by performing a
1556  * get_object() if necessaary.
1557  */
1558 static void kmemleak_cond_resched(struct kmemleak_object *object)
1559 {
1560         if (!get_object(object))
1561                 return; /* Try next object */
1562 
1563         raw_spin_lock_irq(&kmemleak_lock);
1564         if (object->del_state & DELSTATE_REMOVED)
1565                 goto unlock_put;        /* Object removed */
1566         object->del_state |= DELSTATE_NO_DELETE;
1567         raw_spin_unlock_irq(&kmemleak_lock);
1568 
1569         rcu_read_unlock();
1570         cond_resched();
1571         rcu_read_lock();
1572 
1573         raw_spin_lock_irq(&kmemleak_lock);
1574         if (object->del_state & DELSTATE_REMOVED)
1575                 list_del_rcu(&object->object_list);
1576         object->del_state &= ~DELSTATE_NO_DELETE;
1577 unlock_put:
1578         raw_spin_unlock_irq(&kmemleak_lock);
1579         put_object(object);
1580 }
1581 
1582 /*
1583  * Scan data sections and all the referenced memory blocks allocated via the
1584  * kernel's standard allocators. This function must be called with the
1585  * scan_mutex held.
1586  */
1587 static void kmemleak_scan(void)
1588 {
1589         struct kmemleak_object *object;
1590         struct zone *zone;
1591         int __maybe_unused i;
1592         int new_leaks = 0;
1593 
1594         jiffies_last_scan = jiffies;
1595 
1596         /* prepare the kmemleak_object's */
1597         rcu_read_lock();
1598         list_for_each_entry_rcu(object, &object_list, object_list) {
1599                 raw_spin_lock_irq(&object->lock);
1600 #ifdef DEBUG
1601                 /*
1602                  * With a few exceptions there should be a maximum of
1603                  * 1 reference to any object at this point.
1604                  */
1605                 if (atomic_read(&object->use_count) > 1) {
1606                         pr_debug("object->use_count = %d\n",
1607                                  atomic_read(&object->use_count));
1608                         dump_object_info(object);
1609                 }
1610 #endif
1611 
1612                 /* ignore objects outside lowmem (paint them black) */
1613                 if ((object->flags & OBJECT_PHYS) &&
1614                    !(object->flags & OBJECT_NO_SCAN)) {
1615                         unsigned long phys = object->pointer;
1616 
1617                         if (PHYS_PFN(phys) < min_low_pfn ||
1618                             PHYS_PFN(phys + object->size) >= max_low_pfn)
1619                                 __paint_it(object, KMEMLEAK_BLACK);
1620                 }
1621 
1622                 /* reset the reference count (whiten the object) */
1623                 object->count = 0;
1624                 if (color_gray(object) && get_object(object))
1625                         list_add_tail(&object->gray_list, &gray_list);
1626 
1627                 raw_spin_unlock_irq(&object->lock);
1628 
1629                 if (need_resched())
1630                         kmemleak_cond_resched(object);
1631         }
1632         rcu_read_unlock();
1633 
1634 #ifdef CONFIG_SMP
1635         /* per-cpu sections scanning */
1636         for_each_possible_cpu(i)
1637                 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1638                                  __per_cpu_end + per_cpu_offset(i));
1639 #endif
1640 
1641         /*
1642          * Struct page scanning for each node.
1643          */
1644         get_online_mems();
1645         for_each_populated_zone(zone) {
1646                 unsigned long start_pfn = zone->zone_start_pfn;
1647                 unsigned long end_pfn = zone_end_pfn(zone);
1648                 unsigned long pfn;
1649 
1650                 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1651                         struct page *page = pfn_to_online_page(pfn);
1652 
1653                         if (!(pfn & 63))
1654                                 cond_resched();
1655 
1656                         if (!page)
1657                                 continue;
1658 
1659                         /* only scan pages belonging to this zone */
1660                         if (page_zone(page) != zone)
1661                                 continue;
1662                         /* only scan if page is in use */
1663                         if (page_count(page) == 0)
1664                                 continue;
1665                         scan_block(page, page + 1, NULL);
1666                 }
1667         }
1668         put_online_mems();
1669 
1670         /*
1671          * Scanning the task stacks (may introduce false negatives).
1672          */
1673         if (kmemleak_stack_scan) {
1674                 struct task_struct *p, *g;
1675 
1676                 rcu_read_lock();
1677                 for_each_process_thread(g, p) {
1678                         void *stack = try_get_task_stack(p);
1679                         if (stack) {
1680                                 scan_block(stack, stack + THREAD_SIZE, NULL);
1681                                 put_task_stack(p);
1682                         }
1683                 }
1684                 rcu_read_unlock();
1685         }
1686 
1687         /*
1688          * Scan the objects already referenced from the sections scanned
1689          * above.
1690          */
1691         scan_gray_list();
1692 
1693         /*
1694          * Check for new or unreferenced objects modified since the previous
1695          * scan and color them gray until the next scan.
1696          */
1697         rcu_read_lock();
1698         list_for_each_entry_rcu(object, &object_list, object_list) {
1699                 if (need_resched())
1700                         kmemleak_cond_resched(object);
1701 
1702                 /*
1703                  * This is racy but we can save the overhead of lock/unlock
1704                  * calls. The missed objects, if any, should be caught in
1705                  * the next scan.
1706                  */
1707                 if (!color_white(object))
1708                         continue;
1709                 raw_spin_lock_irq(&object->lock);
1710                 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1711                     && update_checksum(object) && get_object(object)) {
1712                         /* color it gray temporarily */
1713                         object->count = object->min_count;
1714                         list_add_tail(&object->gray_list, &gray_list);
1715                 }
1716                 raw_spin_unlock_irq(&object->lock);
1717         }
1718         rcu_read_unlock();
1719 
1720         /*
1721          * Re-scan the gray list for modified unreferenced objects.
1722          */
1723         scan_gray_list();
1724 
1725         /*
1726          * If scanning was stopped do not report any new unreferenced objects.
1727          */
1728         if (scan_should_stop())
1729                 return;
1730 
1731         /*
1732          * Scanning result reporting.
1733          */
1734         rcu_read_lock();
1735         list_for_each_entry_rcu(object, &object_list, object_list) {
1736                 if (need_resched())
1737                         kmemleak_cond_resched(object);
1738 
1739                 /*
1740                  * This is racy but we can save the overhead of lock/unlock
1741                  * calls. The missed objects, if any, should be caught in
1742                  * the next scan.
1743                  */
1744                 if (!color_white(object))
1745                         continue;
1746                 raw_spin_lock_irq(&object->lock);
1747                 if (unreferenced_object(object) &&
1748                     !(object->flags & OBJECT_REPORTED)) {
1749                         object->flags |= OBJECT_REPORTED;
1750 
1751                         if (kmemleak_verbose)
1752                                 print_unreferenced(NULL, object);
1753 
1754                         new_leaks++;
1755                 }
1756                 raw_spin_unlock_irq(&object->lock);
1757         }
1758         rcu_read_unlock();
1759 
1760         if (new_leaks) {
1761                 kmemleak_found_leaks = true;
1762 
1763                 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1764                         new_leaks);
1765         }
1766 
1767 }
1768 
1769 /*
1770  * Thread function performing automatic memory scanning. Unreferenced objects
1771  * at the end of a memory scan are reported but only the first time.
1772  */
1773 static int kmemleak_scan_thread(void *arg)
1774 {
1775         static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1776 
1777         pr_info("Automatic memory scanning thread started\n");
1778         set_user_nice(current, 10);
1779 
1780         /*
1781          * Wait before the first scan to allow the system to fully initialize.
1782          */
1783         if (first_run) {
1784                 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1785                 first_run = 0;
1786                 while (timeout && !kthread_should_stop())
1787                         timeout = schedule_timeout_interruptible(timeout);
1788         }
1789 
1790         while (!kthread_should_stop()) {
1791                 signed long timeout = READ_ONCE(jiffies_scan_wait);
1792 
1793                 mutex_lock(&scan_mutex);
1794                 kmemleak_scan();
1795                 mutex_unlock(&scan_mutex);
1796 
1797                 /* wait before the next scan */
1798                 while (timeout && !kthread_should_stop())
1799                         timeout = schedule_timeout_interruptible(timeout);
1800         }
1801 
1802         pr_info("Automatic memory scanning thread ended\n");
1803 
1804         return 0;
1805 }
1806 
1807 /*
1808  * Start the automatic memory scanning thread. This function must be called
1809  * with the scan_mutex held.
1810  */
1811 static void start_scan_thread(void)
1812 {
1813         if (scan_thread)
1814                 return;
1815         scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1816         if (IS_ERR(scan_thread)) {
1817                 pr_warn("Failed to create the scan thread\n");
1818                 scan_thread = NULL;
1819         }
1820 }
1821 
1822 /*
1823  * Stop the automatic memory scanning thread.
1824  */
1825 static void stop_scan_thread(void)
1826 {
1827         if (scan_thread) {
1828                 kthread_stop(scan_thread);
1829                 scan_thread = NULL;
1830         }
1831 }
1832 
1833 /*
1834  * Iterate over the object_list and return the first valid object at or after
1835  * the required position with its use_count incremented. The function triggers
1836  * a memory scanning when the pos argument points to the first position.
1837  */
1838 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1839 {
1840         struct kmemleak_object *object;
1841         loff_t n = *pos;
1842         int err;
1843 
1844         err = mutex_lock_interruptible(&scan_mutex);
1845         if (err < 0)
1846                 return ERR_PTR(err);
1847 
1848         rcu_read_lock();
1849         list_for_each_entry_rcu(object, &object_list, object_list) {
1850                 if (n-- > 0)
1851                         continue;
1852                 if (get_object(object))
1853                         goto out;
1854         }
1855         object = NULL;
1856 out:
1857         return object;
1858 }
1859 
1860 /*
1861  * Return the next object in the object_list. The function decrements the
1862  * use_count of the previous object and increases that of the next one.
1863  */
1864 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1865 {
1866         struct kmemleak_object *prev_obj = v;
1867         struct kmemleak_object *next_obj = NULL;
1868         struct kmemleak_object *obj = prev_obj;
1869 
1870         ++(*pos);
1871 
1872         list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1873                 if (get_object(obj)) {
1874                         next_obj = obj;
1875                         break;
1876                 }
1877         }
1878 
1879         put_object(prev_obj);
1880         return next_obj;
1881 }
1882 
1883 /*
1884  * Decrement the use_count of the last object required, if any.
1885  */
1886 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1887 {
1888         if (!IS_ERR(v)) {
1889                 /*
1890                  * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1891                  * waiting was interrupted, so only release it if !IS_ERR.
1892                  */
1893                 rcu_read_unlock();
1894                 mutex_unlock(&scan_mutex);
1895                 if (v)
1896                         put_object(v);
1897         }
1898 }
1899 
1900 /*
1901  * Print the information for an unreferenced object to the seq file.
1902  */
1903 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1904 {
1905         struct kmemleak_object *object = v;
1906         unsigned long flags;
1907 
1908         raw_spin_lock_irqsave(&object->lock, flags);
1909         if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1910                 print_unreferenced(seq, object);
1911         raw_spin_unlock_irqrestore(&object->lock, flags);
1912         return 0;
1913 }
1914 
1915 static const struct seq_operations kmemleak_seq_ops = {
1916         .start = kmemleak_seq_start,
1917         .next  = kmemleak_seq_next,
1918         .stop  = kmemleak_seq_stop,
1919         .show  = kmemleak_seq_show,
1920 };
1921 
1922 static int kmemleak_open(struct inode *inode, struct file *file)
1923 {
1924         return seq_open(file, &kmemleak_seq_ops);
1925 }
1926 
1927 static int dump_str_object_info(const char *str)
1928 {
1929         unsigned long flags;
1930         struct kmemleak_object *object;
1931         unsigned long addr;
1932 
1933         if (kstrtoul(str, 0, &addr))
1934                 return -EINVAL;
1935         object = find_and_get_object(addr, 0);
1936         if (!object) {
1937                 pr_info("Unknown object at 0x%08lx\n", addr);
1938                 return -EINVAL;
1939         }
1940 
1941         raw_spin_lock_irqsave(&object->lock, flags);
1942         dump_object_info(object);
1943         raw_spin_unlock_irqrestore(&object->lock, flags);
1944 
1945         put_object(object);
1946         return 0;
1947 }
1948 
1949 /*
1950  * We use grey instead of black to ensure we can do future scans on the same
1951  * objects. If we did not do future scans these black objects could
1952  * potentially contain references to newly allocated objects in the future and
1953  * we'd end up with false positives.
1954  */
1955 static void kmemleak_clear(void)
1956 {
1957         struct kmemleak_object *object;
1958 
1959         rcu_read_lock();
1960         list_for_each_entry_rcu(object, &object_list, object_list) {
1961                 raw_spin_lock_irq(&object->lock);
1962                 if ((object->flags & OBJECT_REPORTED) &&
1963                     unreferenced_object(object))
1964                         __paint_it(object, KMEMLEAK_GREY);
1965                 raw_spin_unlock_irq(&object->lock);
1966         }
1967         rcu_read_unlock();
1968 
1969         kmemleak_found_leaks = false;
1970 }
1971 
1972 static void __kmemleak_do_cleanup(void);
1973 
1974 /*
1975  * File write operation to configure kmemleak at run-time. The following
1976  * commands can be written to the /sys/kernel/debug/kmemleak file:
1977  *   off        - disable kmemleak (irreversible)
1978  *   stack=on   - enable the task stacks scanning
1979  *   stack=off  - disable the tasks stacks scanning
1980  *   scan=on    - start the automatic memory scanning thread
1981  *   scan=off   - stop the automatic memory scanning thread
1982  *   scan=...   - set the automatic memory scanning period in seconds (0 to
1983  *                disable it)
1984  *   scan       - trigger a memory scan
1985  *   clear      - mark all current reported unreferenced kmemleak objects as
1986  *                grey to ignore printing them, or free all kmemleak objects
1987  *                if kmemleak has been disabled.
1988  *   dump=...   - dump information about the object found at the given address
1989  */
1990 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1991                               size_t size, loff_t *ppos)
1992 {
1993         char buf[64];
1994         int buf_size;
1995         int ret;
1996 
1997         buf_size = min(size, (sizeof(buf) - 1));
1998         if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1999                 return -EFAULT;
2000         buf[buf_size] = 0;
2001 
2002         ret = mutex_lock_interruptible(&scan_mutex);
2003         if (ret < 0)
2004                 return ret;
2005 
2006         if (strncmp(buf, "clear", 5) == 0) {
2007                 if (kmemleak_enabled)
2008                         kmemleak_clear();
2009                 else
2010                         __kmemleak_do_cleanup();
2011                 goto out;
2012         }
2013 
2014         if (!kmemleak_enabled) {
2015                 ret = -EPERM;
2016                 goto out;
2017         }
2018 
2019         if (strncmp(buf, "off", 3) == 0)
2020                 kmemleak_disable();
2021         else if (strncmp(buf, "stack=on", 8) == 0)
2022                 kmemleak_stack_scan = 1;
2023         else if (strncmp(buf, "stack=off", 9) == 0)
2024                 kmemleak_stack_scan = 0;
2025         else if (strncmp(buf, "scan=on", 7) == 0)
2026                 start_scan_thread();
2027         else if (strncmp(buf, "scan=off", 8) == 0)
2028                 stop_scan_thread();
2029         else if (strncmp(buf, "scan=", 5) == 0) {
2030                 unsigned secs;
2031                 unsigned long msecs;
2032 
2033                 ret = kstrtouint(buf + 5, 0, &secs);
2034                 if (ret < 0)
2035                         goto out;
2036 
2037                 msecs = secs * MSEC_PER_SEC;
2038                 if (msecs > UINT_MAX)
2039                         msecs = UINT_MAX;
2040 
2041                 stop_scan_thread();
2042                 if (msecs) {
2043                         WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2044                         start_scan_thread();
2045                 }
2046         } else if (strncmp(buf, "scan", 4) == 0)
2047                 kmemleak_scan();
2048         else if (strncmp(buf, "dump=", 5) == 0)
2049                 ret = dump_str_object_info(buf + 5);
2050         else
2051                 ret = -EINVAL;
2052 
2053 out:
2054         mutex_unlock(&scan_mutex);
2055         if (ret < 0)
2056                 return ret;
2057 
2058         /* ignore the rest of the buffer, only one command at a time */
2059         *ppos += size;
2060         return size;
2061 }
2062 
2063 static const struct file_operations kmemleak_fops = {
2064         .owner          = THIS_MODULE,
2065         .open           = kmemleak_open,
2066         .read           = seq_read,
2067         .write          = kmemleak_write,
2068         .llseek         = seq_lseek,
2069         .release        = seq_release,
2070 };
2071 
2072 static void __kmemleak_do_cleanup(void)
2073 {
2074         struct kmemleak_object *object, *tmp;
2075 
2076         /*
2077          * Kmemleak has already been disabled, no need for RCU list traversal
2078          * or kmemleak_lock held.
2079          */
2080         list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2081                 __remove_object(object);
2082                 __delete_object(object);
2083         }
2084 }
2085 
2086 /*
2087  * Stop the memory scanning thread and free the kmemleak internal objects if
2088  * no previous scan thread (otherwise, kmemleak may still have some useful
2089  * information on memory leaks).
2090  */
2091 static void kmemleak_do_cleanup(struct work_struct *work)
2092 {
2093         stop_scan_thread();
2094 
2095         mutex_lock(&scan_mutex);
2096         /*
2097          * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2098          * longer track object freeing. Ordering of the scan thread stopping and
2099          * the memory accesses below is guaranteed by the kthread_stop()
2100          * function.
2101          */
2102         kmemleak_free_enabled = 0;
2103         mutex_unlock(&scan_mutex);
2104 
2105         if (!kmemleak_found_leaks)
2106                 __kmemleak_do_cleanup();
2107         else
2108                 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2109 }
2110 
2111 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2112 
2113 /*
2114  * Disable kmemleak. No memory allocation/freeing will be traced once this
2115  * function is called. Disabling kmemleak is an irreversible operation.
2116  */
2117 static void kmemleak_disable(void)
2118 {
2119         /* atomically check whether it was already invoked */
2120         if (cmpxchg(&kmemleak_error, 0, 1))
2121                 return;
2122 
2123         /* stop any memory operation tracing */
2124         kmemleak_enabled = 0;
2125 
2126         /* check whether it is too early for a kernel thread */
2127         if (kmemleak_late_initialized)
2128                 schedule_work(&cleanup_work);
2129         else
2130                 kmemleak_free_enabled = 0;
2131 
2132         pr_info("Kernel memory leak detector disabled\n");
2133 }
2134 
2135 /*
2136  * Allow boot-time kmemleak disabling (enabled by default).
2137  */
2138 static int __init kmemleak_boot_config(char *str)
2139 {
2140         if (!str)
2141                 return -EINVAL;
2142         if (strcmp(str, "off") == 0)
2143                 kmemleak_disable();
2144         else if (strcmp(str, "on") == 0) {
2145                 kmemleak_skip_disable = 1;
2146                 stack_depot_request_early_init();
2147         }
2148         else
2149                 return -EINVAL;
2150         return 0;
2151 }
2152 early_param("kmemleak", kmemleak_boot_config);
2153 
2154 /*
2155  * Kmemleak initialization.
2156  */
2157 void __init kmemleak_init(void)
2158 {
2159 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2160         if (!kmemleak_skip_disable) {
2161                 kmemleak_disable();
2162                 return;
2163         }
2164 #endif
2165 
2166         if (kmemleak_error)
2167                 return;
2168 
2169         jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2170         jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2171 
2172         object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2173         scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2174 
2175         /* register the data/bss sections */
2176         create_object((unsigned long)_sdata, _edata - _sdata,
2177                       KMEMLEAK_GREY, GFP_ATOMIC);
2178         create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2179                       KMEMLEAK_GREY, GFP_ATOMIC);
2180         /* only register .data..ro_after_init if not within .data */
2181         if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2182                 create_object((unsigned long)__start_ro_after_init,
2183                               __end_ro_after_init - __start_ro_after_init,
2184                               KMEMLEAK_GREY, GFP_ATOMIC);
2185 }
2186 
2187 /*
2188  * Late initialization function.
2189  */
2190 static int __init kmemleak_late_init(void)
2191 {
2192         kmemleak_late_initialized = 1;
2193 
2194         debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2195 
2196         if (kmemleak_error) {
2197                 /*
2198                  * Some error occurred and kmemleak was disabled. There is a
2199                  * small chance that kmemleak_disable() was called immediately
2200                  * after setting kmemleak_late_initialized and we may end up with
2201                  * two clean-up threads but serialized by scan_mutex.
2202                  */
2203                 schedule_work(&cleanup_work);
2204                 return -ENOMEM;
2205         }
2206 
2207         if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2208                 mutex_lock(&scan_mutex);
2209                 start_scan_thread();
2210                 mutex_unlock(&scan_mutex);
2211         }
2212 
2213         pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2214                 mem_pool_free_count);
2215 
2216         return 0;
2217 }
2218 late_initcall(kmemleak_late_init);
2219 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php