1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic infrastructure for lifetime debugging of objects. 4 * 5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 6 */ 7 8 #define pr_fmt(fmt) "ODEBUG: " fmt 9 10 #include <linux/debugobjects.h> 11 #include <linux/interrupt.h> 12 #include <linux/sched.h> 13 #include <linux/sched/task_stack.h> 14 #include <linux/seq_file.h> 15 #include <linux/debugfs.h> 16 #include <linux/slab.h> 17 #include <linux/hash.h> 18 #include <linux/kmemleak.h> 19 #include <linux/cpu.h> 20 21 #define ODEBUG_HASH_BITS 14 22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 23 24 #define ODEBUG_POOL_SIZE 1024 25 #define ODEBUG_POOL_MIN_LEVEL 256 26 #define ODEBUG_POOL_PERCPU_SIZE 64 27 #define ODEBUG_BATCH_SIZE 16 28 29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 32 33 /* 34 * We limit the freeing of debug objects via workqueue at a maximum 35 * frequency of 10Hz and about 1024 objects for each freeing operation. 36 * So it is freeing at most 10k debug objects per second. 37 */ 38 #define ODEBUG_FREE_WORK_MAX 1024 39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) 40 41 struct debug_bucket { 42 struct hlist_head list; 43 raw_spinlock_t lock; 44 }; 45 46 /* 47 * Debug object percpu free list 48 * Access is protected by disabling irq 49 */ 50 struct debug_percpu_free { 51 struct hlist_head free_objs; 52 int obj_free; 53 }; 54 55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool); 56 57 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 58 59 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 60 61 static DEFINE_RAW_SPINLOCK(pool_lock); 62 63 static HLIST_HEAD(obj_pool); 64 static HLIST_HEAD(obj_to_free); 65 66 /* 67 * Because of the presence of percpu free pools, obj_pool_free will 68 * under-count those in the percpu free pools. Similarly, obj_pool_used 69 * will over-count those in the percpu free pools. Adjustments will be 70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used 71 * can be off. 72 */ 73 static int obj_pool_min_free = ODEBUG_POOL_SIZE; 74 static int obj_pool_free = ODEBUG_POOL_SIZE; 75 static int obj_pool_used; 76 static int obj_pool_max_used; 77 static bool obj_freeing; 78 /* The number of objs on the global free list */ 79 static int obj_nr_tofree; 80 81 static int __data_racy debug_objects_maxchain __read_mostly; 82 static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly; 83 static int __data_racy debug_objects_fixups __read_mostly; 84 static int __data_racy debug_objects_warnings __read_mostly; 85 static int __data_racy debug_objects_enabled __read_mostly 86 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 87 static int __data_racy debug_objects_pool_size __read_mostly 88 = ODEBUG_POOL_SIZE; 89 static int __data_racy debug_objects_pool_min_level __read_mostly 90 = ODEBUG_POOL_MIN_LEVEL; 91 92 static const struct debug_obj_descr *descr_test __read_mostly; 93 static struct kmem_cache *obj_cache __ro_after_init; 94 95 /* 96 * Track numbers of kmem_cache_alloc()/free() calls done. 97 */ 98 static int debug_objects_allocated; 99 static int debug_objects_freed; 100 101 static void free_obj_work(struct work_struct *work); 102 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); 103 104 static int __init enable_object_debug(char *str) 105 { 106 debug_objects_enabled = 1; 107 return 0; 108 } 109 110 static int __init disable_object_debug(char *str) 111 { 112 debug_objects_enabled = 0; 113 return 0; 114 } 115 116 early_param("debug_objects", enable_object_debug); 117 early_param("no_debug_objects", disable_object_debug); 118 119 static const char *obj_states[ODEBUG_STATE_MAX] = { 120 [ODEBUG_STATE_NONE] = "none", 121 [ODEBUG_STATE_INIT] = "initialized", 122 [ODEBUG_STATE_INACTIVE] = "inactive", 123 [ODEBUG_STATE_ACTIVE] = "active", 124 [ODEBUG_STATE_DESTROYED] = "destroyed", 125 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 126 }; 127 128 static void fill_pool(void) 129 { 130 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; 131 struct debug_obj *obj; 132 unsigned long flags; 133 134 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) 135 return; 136 137 /* 138 * Reuse objs from the global free list; they will be reinitialized 139 * when allocating. 140 * 141 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the 142 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical 143 * sections. 144 */ 145 while (READ_ONCE(obj_nr_tofree) && 146 READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { 147 raw_spin_lock_irqsave(&pool_lock, flags); 148 /* 149 * Recheck with the lock held as the worker thread might have 150 * won the race and freed the global free list already. 151 */ 152 while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) { 153 obj = hlist_entry(obj_to_free.first, typeof(*obj), node); 154 hlist_del(&obj->node); 155 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); 156 hlist_add_head(&obj->node, &obj_pool); 157 WRITE_ONCE(obj_pool_free, obj_pool_free + 1); 158 } 159 raw_spin_unlock_irqrestore(&pool_lock, flags); 160 } 161 162 if (unlikely(!obj_cache)) 163 return; 164 165 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { 166 struct debug_obj *new[ODEBUG_BATCH_SIZE]; 167 int cnt; 168 169 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { 170 new[cnt] = kmem_cache_zalloc(obj_cache, gfp); 171 if (!new[cnt]) 172 break; 173 } 174 if (!cnt) 175 return; 176 177 raw_spin_lock_irqsave(&pool_lock, flags); 178 while (cnt) { 179 hlist_add_head(&new[--cnt]->node, &obj_pool); 180 debug_objects_allocated++; 181 WRITE_ONCE(obj_pool_free, obj_pool_free + 1); 182 } 183 raw_spin_unlock_irqrestore(&pool_lock, flags); 184 } 185 } 186 187 /* 188 * Lookup an object in the hash bucket. 189 */ 190 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 191 { 192 struct debug_obj *obj; 193 int cnt = 0; 194 195 hlist_for_each_entry(obj, &b->list, node) { 196 cnt++; 197 if (obj->object == addr) 198 return obj; 199 } 200 if (cnt > debug_objects_maxchain) 201 debug_objects_maxchain = cnt; 202 203 return NULL; 204 } 205 206 /* 207 * Allocate a new object from the hlist 208 */ 209 static struct debug_obj *__alloc_object(struct hlist_head *list) 210 { 211 struct debug_obj *obj = NULL; 212 213 if (list->first) { 214 obj = hlist_entry(list->first, typeof(*obj), node); 215 hlist_del(&obj->node); 216 } 217 218 return obj; 219 } 220 221 static struct debug_obj * 222 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) 223 { 224 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); 225 struct debug_obj *obj; 226 227 if (likely(obj_cache)) { 228 obj = __alloc_object(&percpu_pool->free_objs); 229 if (obj) { 230 percpu_pool->obj_free--; 231 goto init_obj; 232 } 233 } 234 235 raw_spin_lock(&pool_lock); 236 obj = __alloc_object(&obj_pool); 237 if (obj) { 238 obj_pool_used++; 239 WRITE_ONCE(obj_pool_free, obj_pool_free - 1); 240 241 /* 242 * Looking ahead, allocate one batch of debug objects and 243 * put them into the percpu free pool. 244 */ 245 if (likely(obj_cache)) { 246 int i; 247 248 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { 249 struct debug_obj *obj2; 250 251 obj2 = __alloc_object(&obj_pool); 252 if (!obj2) 253 break; 254 hlist_add_head(&obj2->node, 255 &percpu_pool->free_objs); 256 percpu_pool->obj_free++; 257 obj_pool_used++; 258 WRITE_ONCE(obj_pool_free, obj_pool_free - 1); 259 } 260 } 261 262 if (obj_pool_used > obj_pool_max_used) 263 obj_pool_max_used = obj_pool_used; 264 265 if (obj_pool_free < obj_pool_min_free) 266 obj_pool_min_free = obj_pool_free; 267 } 268 raw_spin_unlock(&pool_lock); 269 270 init_obj: 271 if (obj) { 272 obj->object = addr; 273 obj->descr = descr; 274 obj->state = ODEBUG_STATE_NONE; 275 obj->astate = 0; 276 hlist_add_head(&obj->node, &b->list); 277 } 278 return obj; 279 } 280 281 /* 282 * workqueue function to free objects. 283 * 284 * To reduce contention on the global pool_lock, the actual freeing of 285 * debug objects will be delayed if the pool_lock is busy. 286 */ 287 static void free_obj_work(struct work_struct *work) 288 { 289 struct hlist_node *tmp; 290 struct debug_obj *obj; 291 unsigned long flags; 292 HLIST_HEAD(tofree); 293 294 WRITE_ONCE(obj_freeing, false); 295 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 296 return; 297 298 if (obj_pool_free >= debug_objects_pool_size) 299 goto free_objs; 300 301 /* 302 * The objs on the pool list might be allocated before the work is 303 * run, so recheck if pool list it full or not, if not fill pool 304 * list from the global free list. As it is likely that a workload 305 * may be gearing up to use more and more objects, don't free any 306 * of them until the next round. 307 */ 308 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { 309 obj = hlist_entry(obj_to_free.first, typeof(*obj), node); 310 hlist_del(&obj->node); 311 hlist_add_head(&obj->node, &obj_pool); 312 WRITE_ONCE(obj_pool_free, obj_pool_free + 1); 313 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); 314 } 315 raw_spin_unlock_irqrestore(&pool_lock, flags); 316 return; 317 318 free_objs: 319 /* 320 * Pool list is already full and there are still objs on the free 321 * list. Move remaining free objs to a temporary list to free the 322 * memory outside the pool_lock held region. 323 */ 324 if (obj_nr_tofree) { 325 hlist_move_list(&obj_to_free, &tofree); 326 debug_objects_freed += obj_nr_tofree; 327 WRITE_ONCE(obj_nr_tofree, 0); 328 } 329 raw_spin_unlock_irqrestore(&pool_lock, flags); 330 331 hlist_for_each_entry_safe(obj, tmp, &tofree, node) { 332 hlist_del(&obj->node); 333 kmem_cache_free(obj_cache, obj); 334 } 335 } 336 337 static void __free_object(struct debug_obj *obj) 338 { 339 struct debug_obj *objs[ODEBUG_BATCH_SIZE]; 340 struct debug_percpu_free *percpu_pool; 341 int lookahead_count = 0; 342 unsigned long flags; 343 bool work; 344 345 local_irq_save(flags); 346 if (!obj_cache) 347 goto free_to_obj_pool; 348 349 /* 350 * Try to free it into the percpu pool first. 351 */ 352 percpu_pool = this_cpu_ptr(&percpu_obj_pool); 353 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) { 354 hlist_add_head(&obj->node, &percpu_pool->free_objs); 355 percpu_pool->obj_free++; 356 local_irq_restore(flags); 357 return; 358 } 359 360 /* 361 * As the percpu pool is full, look ahead and pull out a batch 362 * of objects from the percpu pool and free them as well. 363 */ 364 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { 365 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs); 366 if (!objs[lookahead_count]) 367 break; 368 percpu_pool->obj_free--; 369 } 370 371 free_to_obj_pool: 372 raw_spin_lock(&pool_lock); 373 work = (obj_pool_free > debug_objects_pool_size) && obj_cache && 374 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX); 375 obj_pool_used--; 376 377 if (work) { 378 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); 379 hlist_add_head(&obj->node, &obj_to_free); 380 if (lookahead_count) { 381 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); 382 obj_pool_used -= lookahead_count; 383 while (lookahead_count) { 384 hlist_add_head(&objs[--lookahead_count]->node, 385 &obj_to_free); 386 } 387 } 388 389 if ((obj_pool_free > debug_objects_pool_size) && 390 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) { 391 int i; 392 393 /* 394 * Free one more batch of objects from obj_pool. 395 */ 396 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { 397 obj = __alloc_object(&obj_pool); 398 hlist_add_head(&obj->node, &obj_to_free); 399 WRITE_ONCE(obj_pool_free, obj_pool_free - 1); 400 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); 401 } 402 } 403 } else { 404 WRITE_ONCE(obj_pool_free, obj_pool_free + 1); 405 hlist_add_head(&obj->node, &obj_pool); 406 if (lookahead_count) { 407 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); 408 obj_pool_used -= lookahead_count; 409 while (lookahead_count) { 410 hlist_add_head(&objs[--lookahead_count]->node, 411 &obj_pool); 412 } 413 } 414 } 415 raw_spin_unlock(&pool_lock); 416 local_irq_restore(flags); 417 } 418 419 /* 420 * Put the object back into the pool and schedule work to free objects 421 * if necessary. 422 */ 423 static void free_object(struct debug_obj *obj) 424 { 425 __free_object(obj); 426 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { 427 WRITE_ONCE(obj_freeing, true); 428 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 429 } 430 } 431 432 #ifdef CONFIG_HOTPLUG_CPU 433 static int object_cpu_offline(unsigned int cpu) 434 { 435 struct debug_percpu_free *percpu_pool; 436 struct hlist_node *tmp; 437 struct debug_obj *obj; 438 unsigned long flags; 439 440 /* Remote access is safe as the CPU is dead already */ 441 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); 442 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) { 443 hlist_del(&obj->node); 444 kmem_cache_free(obj_cache, obj); 445 } 446 447 raw_spin_lock_irqsave(&pool_lock, flags); 448 obj_pool_used -= percpu_pool->obj_free; 449 debug_objects_freed += percpu_pool->obj_free; 450 raw_spin_unlock_irqrestore(&pool_lock, flags); 451 452 percpu_pool->obj_free = 0; 453 454 return 0; 455 } 456 #endif 457 458 /* 459 * We run out of memory. That means we probably have tons of objects 460 * allocated. 461 */ 462 static void debug_objects_oom(void) 463 { 464 struct debug_bucket *db = obj_hash; 465 struct hlist_node *tmp; 466 HLIST_HEAD(freelist); 467 struct debug_obj *obj; 468 unsigned long flags; 469 int i; 470 471 pr_warn("Out of memory. ODEBUG disabled\n"); 472 473 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 474 raw_spin_lock_irqsave(&db->lock, flags); 475 hlist_move_list(&db->list, &freelist); 476 raw_spin_unlock_irqrestore(&db->lock, flags); 477 478 /* Now free them */ 479 hlist_for_each_entry_safe(obj, tmp, &freelist, node) { 480 hlist_del(&obj->node); 481 free_object(obj); 482 } 483 } 484 } 485 486 /* 487 * We use the pfn of the address for the hash. That way we can check 488 * for freed objects simply by checking the affected bucket. 489 */ 490 static struct debug_bucket *get_bucket(unsigned long addr) 491 { 492 unsigned long hash; 493 494 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 495 return &obj_hash[hash]; 496 } 497 498 static void debug_print_object(struct debug_obj *obj, char *msg) 499 { 500 const struct debug_obj_descr *descr = obj->descr; 501 static int limit; 502 503 /* 504 * Don't report if lookup_object_or_alloc() by the current thread 505 * failed because lookup_object_or_alloc()/debug_objects_oom() by a 506 * concurrent thread turned off debug_objects_enabled and cleared 507 * the hash buckets. 508 */ 509 if (!debug_objects_enabled) 510 return; 511 512 if (limit < 5 && descr != descr_test) { 513 void *hint = descr->debug_hint ? 514 descr->debug_hint(obj->object) : NULL; 515 limit++; 516 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 517 "object: %p object type: %s hint: %pS\n", 518 msg, obj_states[obj->state], obj->astate, 519 obj->object, descr->name, hint); 520 } 521 debug_objects_warnings++; 522 } 523 524 /* 525 * Try to repair the damage, so we have a better chance to get useful 526 * debug output. 527 */ 528 static bool 529 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), 530 void * addr, enum debug_obj_state state) 531 { 532 if (fixup && fixup(addr, state)) { 533 debug_objects_fixups++; 534 return true; 535 } 536 return false; 537 } 538 539 static void debug_object_is_on_stack(void *addr, int onstack) 540 { 541 int is_on_stack; 542 static int limit; 543 544 if (limit > 4) 545 return; 546 547 is_on_stack = object_is_on_stack(addr); 548 if (is_on_stack == onstack) 549 return; 550 551 limit++; 552 if (is_on_stack) 553 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, 554 task_stack_page(current)); 555 else 556 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, 557 task_stack_page(current)); 558 559 WARN_ON(1); 560 } 561 562 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, 563 const struct debug_obj_descr *descr, 564 bool onstack, bool alloc_ifstatic) 565 { 566 struct debug_obj *obj = lookup_object(addr, b); 567 enum debug_obj_state state = ODEBUG_STATE_NONE; 568 569 if (likely(obj)) 570 return obj; 571 572 /* 573 * debug_object_init() unconditionally allocates untracked 574 * objects. It does not matter whether it is a static object or 575 * not. 576 * 577 * debug_object_assert_init() and debug_object_activate() allow 578 * allocation only if the descriptor callback confirms that the 579 * object is static and considered initialized. For non-static 580 * objects the allocation needs to be done from the fixup callback. 581 */ 582 if (unlikely(alloc_ifstatic)) { 583 if (!descr->is_static_object || !descr->is_static_object(addr)) 584 return ERR_PTR(-ENOENT); 585 /* Statically allocated objects are considered initialized */ 586 state = ODEBUG_STATE_INIT; 587 } 588 589 obj = alloc_object(addr, b, descr); 590 if (likely(obj)) { 591 obj->state = state; 592 debug_object_is_on_stack(addr, onstack); 593 return obj; 594 } 595 596 /* Out of memory. Do the cleanup outside of the locked region */ 597 debug_objects_enabled = 0; 598 return NULL; 599 } 600 601 static void debug_objects_fill_pool(void) 602 { 603 /* 604 * On RT enabled kernels the pool refill must happen in preemptible 605 * context -- for !RT kernels we rely on the fact that spinlock_t and 606 * raw_spinlock_t are basically the same type and this lock-type 607 * inversion works just fine. 608 */ 609 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { 610 /* 611 * Annotate away the spinlock_t inside raw_spinlock_t warning 612 * by temporarily raising the wait-type to WAIT_SLEEP, matching 613 * the preemptible() condition above. 614 */ 615 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); 616 lock_map_acquire_try(&fill_pool_map); 617 fill_pool(); 618 lock_map_release(&fill_pool_map); 619 } 620 } 621 622 static void 623 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) 624 { 625 struct debug_obj *obj, o; 626 struct debug_bucket *db; 627 unsigned long flags; 628 629 debug_objects_fill_pool(); 630 631 db = get_bucket((unsigned long) addr); 632 633 raw_spin_lock_irqsave(&db->lock, flags); 634 635 obj = lookup_object_or_alloc(addr, db, descr, onstack, false); 636 if (unlikely(!obj)) { 637 raw_spin_unlock_irqrestore(&db->lock, flags); 638 debug_objects_oom(); 639 return; 640 } 641 642 switch (obj->state) { 643 case ODEBUG_STATE_NONE: 644 case ODEBUG_STATE_INIT: 645 case ODEBUG_STATE_INACTIVE: 646 obj->state = ODEBUG_STATE_INIT; 647 raw_spin_unlock_irqrestore(&db->lock, flags); 648 return; 649 default: 650 break; 651 } 652 653 o = *obj; 654 raw_spin_unlock_irqrestore(&db->lock, flags); 655 debug_print_object(&o, "init"); 656 657 if (o.state == ODEBUG_STATE_ACTIVE) 658 debug_object_fixup(descr->fixup_init, addr, o.state); 659 } 660 661 /** 662 * debug_object_init - debug checks when an object is initialized 663 * @addr: address of the object 664 * @descr: pointer to an object specific debug description structure 665 */ 666 void debug_object_init(void *addr, const struct debug_obj_descr *descr) 667 { 668 if (!debug_objects_enabled) 669 return; 670 671 __debug_object_init(addr, descr, 0); 672 } 673 EXPORT_SYMBOL_GPL(debug_object_init); 674 675 /** 676 * debug_object_init_on_stack - debug checks when an object on stack is 677 * initialized 678 * @addr: address of the object 679 * @descr: pointer to an object specific debug description structure 680 */ 681 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) 682 { 683 if (!debug_objects_enabled) 684 return; 685 686 __debug_object_init(addr, descr, 1); 687 } 688 EXPORT_SYMBOL_GPL(debug_object_init_on_stack); 689 690 /** 691 * debug_object_activate - debug checks when an object is activated 692 * @addr: address of the object 693 * @descr: pointer to an object specific debug description structure 694 * Returns 0 for success, -EINVAL for check failed. 695 */ 696 int debug_object_activate(void *addr, const struct debug_obj_descr *descr) 697 { 698 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 699 struct debug_bucket *db; 700 struct debug_obj *obj; 701 unsigned long flags; 702 703 if (!debug_objects_enabled) 704 return 0; 705 706 debug_objects_fill_pool(); 707 708 db = get_bucket((unsigned long) addr); 709 710 raw_spin_lock_irqsave(&db->lock, flags); 711 712 obj = lookup_object_or_alloc(addr, db, descr, false, true); 713 if (unlikely(!obj)) { 714 raw_spin_unlock_irqrestore(&db->lock, flags); 715 debug_objects_oom(); 716 return 0; 717 } else if (likely(!IS_ERR(obj))) { 718 switch (obj->state) { 719 case ODEBUG_STATE_ACTIVE: 720 case ODEBUG_STATE_DESTROYED: 721 o = *obj; 722 break; 723 case ODEBUG_STATE_INIT: 724 case ODEBUG_STATE_INACTIVE: 725 obj->state = ODEBUG_STATE_ACTIVE; 726 fallthrough; 727 default: 728 raw_spin_unlock_irqrestore(&db->lock, flags); 729 return 0; 730 } 731 } 732 733 raw_spin_unlock_irqrestore(&db->lock, flags); 734 debug_print_object(&o, "activate"); 735 736 switch (o.state) { 737 case ODEBUG_STATE_ACTIVE: 738 case ODEBUG_STATE_NOTAVAILABLE: 739 if (debug_object_fixup(descr->fixup_activate, addr, o.state)) 740 return 0; 741 fallthrough; 742 default: 743 return -EINVAL; 744 } 745 } 746 EXPORT_SYMBOL_GPL(debug_object_activate); 747 748 /** 749 * debug_object_deactivate - debug checks when an object is deactivated 750 * @addr: address of the object 751 * @descr: pointer to an object specific debug description structure 752 */ 753 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) 754 { 755 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 756 struct debug_bucket *db; 757 struct debug_obj *obj; 758 unsigned long flags; 759 760 if (!debug_objects_enabled) 761 return; 762 763 db = get_bucket((unsigned long) addr); 764 765 raw_spin_lock_irqsave(&db->lock, flags); 766 767 obj = lookup_object(addr, db); 768 if (obj) { 769 switch (obj->state) { 770 case ODEBUG_STATE_DESTROYED: 771 break; 772 case ODEBUG_STATE_INIT: 773 case ODEBUG_STATE_INACTIVE: 774 case ODEBUG_STATE_ACTIVE: 775 if (obj->astate) 776 break; 777 obj->state = ODEBUG_STATE_INACTIVE; 778 fallthrough; 779 default: 780 raw_spin_unlock_irqrestore(&db->lock, flags); 781 return; 782 } 783 o = *obj; 784 } 785 786 raw_spin_unlock_irqrestore(&db->lock, flags); 787 debug_print_object(&o, "deactivate"); 788 } 789 EXPORT_SYMBOL_GPL(debug_object_deactivate); 790 791 /** 792 * debug_object_destroy - debug checks when an object is destroyed 793 * @addr: address of the object 794 * @descr: pointer to an object specific debug description structure 795 */ 796 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) 797 { 798 struct debug_obj *obj, o; 799 struct debug_bucket *db; 800 unsigned long flags; 801 802 if (!debug_objects_enabled) 803 return; 804 805 db = get_bucket((unsigned long) addr); 806 807 raw_spin_lock_irqsave(&db->lock, flags); 808 809 obj = lookup_object(addr, db); 810 if (!obj) { 811 raw_spin_unlock_irqrestore(&db->lock, flags); 812 return; 813 } 814 815 switch (obj->state) { 816 case ODEBUG_STATE_ACTIVE: 817 case ODEBUG_STATE_DESTROYED: 818 break; 819 case ODEBUG_STATE_NONE: 820 case ODEBUG_STATE_INIT: 821 case ODEBUG_STATE_INACTIVE: 822 obj->state = ODEBUG_STATE_DESTROYED; 823 fallthrough; 824 default: 825 raw_spin_unlock_irqrestore(&db->lock, flags); 826 return; 827 } 828 829 o = *obj; 830 raw_spin_unlock_irqrestore(&db->lock, flags); 831 debug_print_object(&o, "destroy"); 832 833 if (o.state == ODEBUG_STATE_ACTIVE) 834 debug_object_fixup(descr->fixup_destroy, addr, o.state); 835 } 836 EXPORT_SYMBOL_GPL(debug_object_destroy); 837 838 /** 839 * debug_object_free - debug checks when an object is freed 840 * @addr: address of the object 841 * @descr: pointer to an object specific debug description structure 842 */ 843 void debug_object_free(void *addr, const struct debug_obj_descr *descr) 844 { 845 struct debug_obj *obj, o; 846 struct debug_bucket *db; 847 unsigned long flags; 848 849 if (!debug_objects_enabled) 850 return; 851 852 db = get_bucket((unsigned long) addr); 853 854 raw_spin_lock_irqsave(&db->lock, flags); 855 856 obj = lookup_object(addr, db); 857 if (!obj) { 858 raw_spin_unlock_irqrestore(&db->lock, flags); 859 return; 860 } 861 862 switch (obj->state) { 863 case ODEBUG_STATE_ACTIVE: 864 break; 865 default: 866 hlist_del(&obj->node); 867 raw_spin_unlock_irqrestore(&db->lock, flags); 868 free_object(obj); 869 return; 870 } 871 872 o = *obj; 873 raw_spin_unlock_irqrestore(&db->lock, flags); 874 debug_print_object(&o, "free"); 875 876 debug_object_fixup(descr->fixup_free, addr, o.state); 877 } 878 EXPORT_SYMBOL_GPL(debug_object_free); 879 880 /** 881 * debug_object_assert_init - debug checks when object should be init-ed 882 * @addr: address of the object 883 * @descr: pointer to an object specific debug description structure 884 */ 885 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) 886 { 887 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 888 struct debug_bucket *db; 889 struct debug_obj *obj; 890 unsigned long flags; 891 892 if (!debug_objects_enabled) 893 return; 894 895 debug_objects_fill_pool(); 896 897 db = get_bucket((unsigned long) addr); 898 899 raw_spin_lock_irqsave(&db->lock, flags); 900 obj = lookup_object_or_alloc(addr, db, descr, false, true); 901 raw_spin_unlock_irqrestore(&db->lock, flags); 902 if (likely(!IS_ERR_OR_NULL(obj))) 903 return; 904 905 /* If NULL the allocation has hit OOM */ 906 if (!obj) { 907 debug_objects_oom(); 908 return; 909 } 910 911 /* Object is neither tracked nor static. It's not initialized. */ 912 debug_print_object(&o, "assert_init"); 913 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); 914 } 915 EXPORT_SYMBOL_GPL(debug_object_assert_init); 916 917 /** 918 * debug_object_active_state - debug checks object usage state machine 919 * @addr: address of the object 920 * @descr: pointer to an object specific debug description structure 921 * @expect: expected state 922 * @next: state to move to if expected state is found 923 */ 924 void 925 debug_object_active_state(void *addr, const struct debug_obj_descr *descr, 926 unsigned int expect, unsigned int next) 927 { 928 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 929 struct debug_bucket *db; 930 struct debug_obj *obj; 931 unsigned long flags; 932 933 if (!debug_objects_enabled) 934 return; 935 936 db = get_bucket((unsigned long) addr); 937 938 raw_spin_lock_irqsave(&db->lock, flags); 939 940 obj = lookup_object(addr, db); 941 if (obj) { 942 switch (obj->state) { 943 case ODEBUG_STATE_ACTIVE: 944 if (obj->astate != expect) 945 break; 946 obj->astate = next; 947 raw_spin_unlock_irqrestore(&db->lock, flags); 948 return; 949 default: 950 break; 951 } 952 o = *obj; 953 } 954 955 raw_spin_unlock_irqrestore(&db->lock, flags); 956 debug_print_object(&o, "active_state"); 957 } 958 EXPORT_SYMBOL_GPL(debug_object_active_state); 959 960 #ifdef CONFIG_DEBUG_OBJECTS_FREE 961 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 962 { 963 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 964 int cnt, objs_checked = 0; 965 struct debug_obj *obj, o; 966 struct debug_bucket *db; 967 struct hlist_node *tmp; 968 969 saddr = (unsigned long) address; 970 eaddr = saddr + size; 971 paddr = saddr & ODEBUG_CHUNK_MASK; 972 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 973 chunks >>= ODEBUG_CHUNK_SHIFT; 974 975 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 976 db = get_bucket(paddr); 977 978 repeat: 979 cnt = 0; 980 raw_spin_lock_irqsave(&db->lock, flags); 981 hlist_for_each_entry_safe(obj, tmp, &db->list, node) { 982 cnt++; 983 oaddr = (unsigned long) obj->object; 984 if (oaddr < saddr || oaddr >= eaddr) 985 continue; 986 987 switch (obj->state) { 988 case ODEBUG_STATE_ACTIVE: 989 o = *obj; 990 raw_spin_unlock_irqrestore(&db->lock, flags); 991 debug_print_object(&o, "free"); 992 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state); 993 goto repeat; 994 default: 995 hlist_del(&obj->node); 996 __free_object(obj); 997 break; 998 } 999 } 1000 raw_spin_unlock_irqrestore(&db->lock, flags); 1001 1002 if (cnt > debug_objects_maxchain) 1003 debug_objects_maxchain = cnt; 1004 1005 objs_checked += cnt; 1006 } 1007 1008 if (objs_checked > debug_objects_maxchecked) 1009 debug_objects_maxchecked = objs_checked; 1010 1011 /* Schedule work to actually kmem_cache_free() objects */ 1012 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { 1013 WRITE_ONCE(obj_freeing, true); 1014 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 1015 } 1016 } 1017 1018 void debug_check_no_obj_freed(const void *address, unsigned long size) 1019 { 1020 if (debug_objects_enabled) 1021 __debug_check_no_obj_freed(address, size); 1022 } 1023 #endif 1024 1025 #ifdef CONFIG_DEBUG_FS 1026 1027 static int debug_stats_show(struct seq_file *m, void *v) 1028 { 1029 int cpu, obj_percpu_free = 0; 1030 1031 for_each_possible_cpu(cpu) 1032 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); 1033 1034 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 1035 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); 1036 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 1037 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 1038 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); 1039 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); 1040 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 1041 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); 1042 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 1043 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); 1044 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 1045 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 1046 return 0; 1047 } 1048 DEFINE_SHOW_ATTRIBUTE(debug_stats); 1049 1050 static int __init debug_objects_init_debugfs(void) 1051 { 1052 struct dentry *dbgdir; 1053 1054 if (!debug_objects_enabled) 1055 return 0; 1056 1057 dbgdir = debugfs_create_dir("debug_objects", NULL); 1058 1059 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); 1060 1061 return 0; 1062 } 1063 __initcall(debug_objects_init_debugfs); 1064 1065 #else 1066 static inline void debug_objects_init_debugfs(void) { } 1067 #endif 1068 1069 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 1070 1071 /* Random data structure for the self test */ 1072 struct self_test { 1073 unsigned long dummy1[6]; 1074 int static_init; 1075 unsigned long dummy2[3]; 1076 }; 1077 1078 static __initconst const struct debug_obj_descr descr_type_test; 1079 1080 static bool __init is_static_object(void *addr) 1081 { 1082 struct self_test *obj = addr; 1083 1084 return obj->static_init; 1085 } 1086 1087 /* 1088 * fixup_init is called when: 1089 * - an active object is initialized 1090 */ 1091 static bool __init fixup_init(void *addr, enum debug_obj_state state) 1092 { 1093 struct self_test *obj = addr; 1094 1095 switch (state) { 1096 case ODEBUG_STATE_ACTIVE: 1097 debug_object_deactivate(obj, &descr_type_test); 1098 debug_object_init(obj, &descr_type_test); 1099 return true; 1100 default: 1101 return false; 1102 } 1103 } 1104 1105 /* 1106 * fixup_activate is called when: 1107 * - an active object is activated 1108 * - an unknown non-static object is activated 1109 */ 1110 static bool __init fixup_activate(void *addr, enum debug_obj_state state) 1111 { 1112 struct self_test *obj = addr; 1113 1114 switch (state) { 1115 case ODEBUG_STATE_NOTAVAILABLE: 1116 return true; 1117 case ODEBUG_STATE_ACTIVE: 1118 debug_object_deactivate(obj, &descr_type_test); 1119 debug_object_activate(obj, &descr_type_test); 1120 return true; 1121 1122 default: 1123 return false; 1124 } 1125 } 1126 1127 /* 1128 * fixup_destroy is called when: 1129 * - an active object is destroyed 1130 */ 1131 static bool __init fixup_destroy(void *addr, enum debug_obj_state state) 1132 { 1133 struct self_test *obj = addr; 1134 1135 switch (state) { 1136 case ODEBUG_STATE_ACTIVE: 1137 debug_object_deactivate(obj, &descr_type_test); 1138 debug_object_destroy(obj, &descr_type_test); 1139 return true; 1140 default: 1141 return false; 1142 } 1143 } 1144 1145 /* 1146 * fixup_free is called when: 1147 * - an active object is freed 1148 */ 1149 static bool __init fixup_free(void *addr, enum debug_obj_state state) 1150 { 1151 struct self_test *obj = addr; 1152 1153 switch (state) { 1154 case ODEBUG_STATE_ACTIVE: 1155 debug_object_deactivate(obj, &descr_type_test); 1156 debug_object_free(obj, &descr_type_test); 1157 return true; 1158 default: 1159 return false; 1160 } 1161 } 1162 1163 static int __init 1164 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 1165 { 1166 struct debug_bucket *db; 1167 struct debug_obj *obj; 1168 unsigned long flags; 1169 int res = -EINVAL; 1170 1171 db = get_bucket((unsigned long) addr); 1172 1173 raw_spin_lock_irqsave(&db->lock, flags); 1174 1175 obj = lookup_object(addr, db); 1176 if (!obj && state != ODEBUG_STATE_NONE) { 1177 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 1178 goto out; 1179 } 1180 if (obj && obj->state != state) { 1181 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 1182 obj->state, state); 1183 goto out; 1184 } 1185 if (fixups != debug_objects_fixups) { 1186 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 1187 fixups, debug_objects_fixups); 1188 goto out; 1189 } 1190 if (warnings != debug_objects_warnings) { 1191 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 1192 warnings, debug_objects_warnings); 1193 goto out; 1194 } 1195 res = 0; 1196 out: 1197 raw_spin_unlock_irqrestore(&db->lock, flags); 1198 if (res) 1199 debug_objects_enabled = 0; 1200 return res; 1201 } 1202 1203 static __initconst const struct debug_obj_descr descr_type_test = { 1204 .name = "selftest", 1205 .is_static_object = is_static_object, 1206 .fixup_init = fixup_init, 1207 .fixup_activate = fixup_activate, 1208 .fixup_destroy = fixup_destroy, 1209 .fixup_free = fixup_free, 1210 }; 1211 1212 static __initdata struct self_test obj = { .static_init = 0 }; 1213 1214 static void __init debug_objects_selftest(void) 1215 { 1216 int fixups, oldfixups, warnings, oldwarnings; 1217 unsigned long flags; 1218 1219 local_irq_save(flags); 1220 1221 fixups = oldfixups = debug_objects_fixups; 1222 warnings = oldwarnings = debug_objects_warnings; 1223 descr_test = &descr_type_test; 1224 1225 debug_object_init(&obj, &descr_type_test); 1226 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1227 goto out; 1228 debug_object_activate(&obj, &descr_type_test); 1229 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1230 goto out; 1231 debug_object_activate(&obj, &descr_type_test); 1232 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 1233 goto out; 1234 debug_object_deactivate(&obj, &descr_type_test); 1235 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 1236 goto out; 1237 debug_object_destroy(&obj, &descr_type_test); 1238 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 1239 goto out; 1240 debug_object_init(&obj, &descr_type_test); 1241 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1242 goto out; 1243 debug_object_activate(&obj, &descr_type_test); 1244 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1245 goto out; 1246 debug_object_deactivate(&obj, &descr_type_test); 1247 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1248 goto out; 1249 debug_object_free(&obj, &descr_type_test); 1250 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1251 goto out; 1252 1253 obj.static_init = 1; 1254 debug_object_activate(&obj, &descr_type_test); 1255 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1256 goto out; 1257 debug_object_init(&obj, &descr_type_test); 1258 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 1259 goto out; 1260 debug_object_free(&obj, &descr_type_test); 1261 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1262 goto out; 1263 1264 #ifdef CONFIG_DEBUG_OBJECTS_FREE 1265 debug_object_init(&obj, &descr_type_test); 1266 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1267 goto out; 1268 debug_object_activate(&obj, &descr_type_test); 1269 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1270 goto out; 1271 __debug_check_no_obj_freed(&obj, sizeof(obj)); 1272 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 1273 goto out; 1274 #endif 1275 pr_info("selftest passed\n"); 1276 1277 out: 1278 debug_objects_fixups = oldfixups; 1279 debug_objects_warnings = oldwarnings; 1280 descr_test = NULL; 1281 1282 local_irq_restore(flags); 1283 } 1284 #else 1285 static inline void debug_objects_selftest(void) { } 1286 #endif 1287 1288 /* 1289 * Called during early boot to initialize the hash buckets and link 1290 * the static object pool objects into the poll list. After this call 1291 * the object tracker is fully operational. 1292 */ 1293 void __init debug_objects_early_init(void) 1294 { 1295 int i; 1296 1297 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 1298 raw_spin_lock_init(&obj_hash[i].lock); 1299 1300 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 1301 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 1302 } 1303 1304 /* 1305 * Convert the statically allocated objects to dynamic ones: 1306 */ 1307 static int __init debug_objects_replace_static_objects(void) 1308 { 1309 struct debug_bucket *db = obj_hash; 1310 struct hlist_node *tmp; 1311 struct debug_obj *obj, *new; 1312 HLIST_HEAD(objects); 1313 int i, cnt = 0; 1314 1315 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 1316 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 1317 if (!obj) 1318 goto free; 1319 hlist_add_head(&obj->node, &objects); 1320 } 1321 1322 debug_objects_allocated += i; 1323 1324 /* 1325 * debug_objects_mem_init() is now called early that only one CPU is up 1326 * and interrupts have been disabled, so it is safe to replace the 1327 * active object references. 1328 */ 1329 1330 /* Remove the statically allocated objects from the pool */ 1331 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) 1332 hlist_del(&obj->node); 1333 /* Move the allocated objects to the pool */ 1334 hlist_move_list(&objects, &obj_pool); 1335 1336 /* Replace the active object references */ 1337 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1338 hlist_move_list(&db->list, &objects); 1339 1340 hlist_for_each_entry(obj, &objects, node) { 1341 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1342 hlist_del(&new->node); 1343 /* copy object data */ 1344 *new = *obj; 1345 hlist_add_head(&new->node, &db->list); 1346 cnt++; 1347 } 1348 } 1349 1350 pr_debug("%d of %d active objects replaced\n", 1351 cnt, obj_pool_used); 1352 return 0; 1353 free: 1354 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1355 hlist_del(&obj->node); 1356 kmem_cache_free(obj_cache, obj); 1357 } 1358 return -ENOMEM; 1359 } 1360 1361 /* 1362 * Called after the kmem_caches are functional to setup a dedicated 1363 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1364 * prevents that the debug code is called on kmem_cache_free() for the 1365 * debug tracker objects to avoid recursive calls. 1366 */ 1367 void __init debug_objects_mem_init(void) 1368 { 1369 int cpu, extras; 1370 1371 if (!debug_objects_enabled) 1372 return; 1373 1374 /* 1375 * Initialize the percpu object pools 1376 * 1377 * Initialization is not strictly necessary, but was done for 1378 * completeness. 1379 */ 1380 for_each_possible_cpu(cpu) 1381 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); 1382 1383 obj_cache = kmem_cache_create("debug_objects_cache", 1384 sizeof (struct debug_obj), 0, 1385 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, 1386 NULL); 1387 1388 if (!obj_cache || debug_objects_replace_static_objects()) { 1389 debug_objects_enabled = 0; 1390 kmem_cache_destroy(obj_cache); 1391 pr_warn("out of memory.\n"); 1392 return; 1393 } else 1394 debug_objects_selftest(); 1395 1396 #ifdef CONFIG_HOTPLUG_CPU 1397 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, 1398 object_cpu_offline); 1399 #endif 1400 1401 /* 1402 * Increase the thresholds for allocating and freeing objects 1403 * according to the number of possible CPUs available in the system. 1404 */ 1405 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; 1406 debug_objects_pool_size += extras; 1407 debug_objects_pool_min_level += extras; 1408 } 1409
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.