1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * mm/debug.c 3 * mm/debug.c 4 * 4 * 5 * mm/ specific debug routines. 5 * mm/ specific debug routines. 6 * 6 * 7 */ 7 */ 8 8 9 #include <linux/kernel.h> 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 10 #include <linux/mm.h> 11 #include <linux/trace_events.h> 11 #include <linux/trace_events.h> 12 #include <linux/memcontrol.h> 12 #include <linux/memcontrol.h> 13 #include <trace/events/mmflags.h> 13 #include <trace/events/mmflags.h> 14 #include <linux/migrate.h> 14 #include <linux/migrate.h> 15 #include <linux/page_owner.h> 15 #include <linux/page_owner.h> 16 #include <linux/ctype.h> 16 #include <linux/ctype.h> 17 17 18 #include "internal.h" 18 #include "internal.h" 19 #include <trace/events/migrate.h> << 20 << 21 /* << 22 * Define EM() and EMe() so that MIGRATE_REASO << 23 * be used to populate migrate_reason_names[]. << 24 */ << 25 #undef EM << 26 #undef EMe << 27 #define EM(a, b) b, << 28 #define EMe(a, b) b << 29 19 30 const char *migrate_reason_names[MR_TYPES] = { 20 const char *migrate_reason_names[MR_TYPES] = { 31 MIGRATE_REASON !! 21 "compaction", >> 22 "memory_failure", >> 23 "memory_hotplug", >> 24 "syscall_or_cpuset", >> 25 "mempolicy_mbind", >> 26 "numa_misplaced", >> 27 "contig_range", >> 28 "longterm_pin", 32 }; 29 }; 33 30 34 const struct trace_print_flags pageflag_names[ 31 const struct trace_print_flags pageflag_names[] = { 35 __def_pageflag_names, 32 __def_pageflag_names, 36 {0, NULL} 33 {0, NULL} 37 }; 34 }; 38 35 39 const struct trace_print_flags gfpflag_names[] 36 const struct trace_print_flags gfpflag_names[] = { 40 __def_gfpflag_names, 37 __def_gfpflag_names, 41 {0, NULL} 38 {0, NULL} 42 }; 39 }; 43 40 44 const struct trace_print_flags vmaflag_names[] 41 const struct trace_print_flags vmaflag_names[] = { 45 __def_vmaflag_names, 42 __def_vmaflag_names, 46 {0, NULL} 43 {0, NULL} 47 }; 44 }; 48 45 49 #define DEF_PAGETYPE_NAME(_name) [PGTY_##_name !! 46 static void __dump_page(struct page *page) 50 << 51 static const char *page_type_names[] = { << 52 DEF_PAGETYPE_NAME(slab), << 53 DEF_PAGETYPE_NAME(hugetlb), << 54 DEF_PAGETYPE_NAME(offline), << 55 DEF_PAGETYPE_NAME(guard), << 56 DEF_PAGETYPE_NAME(table), << 57 DEF_PAGETYPE_NAME(buddy), << 58 DEF_PAGETYPE_NAME(unaccepted), << 59 }; << 60 << 61 static const char *page_type_name(unsigned int << 62 { 47 { 63 unsigned i = (page_type >> 24) - 0xf0; !! 48 struct page *head = compound_head(page); >> 49 struct address_space *mapping; >> 50 bool compound = PageCompound(page); >> 51 /* >> 52 * Accessing the pageblock without the zone lock. It could change to >> 53 * "isolate" again in the meantime, but since we are just dumping the >> 54 * state for debugging, it should be fine to accept a bit of >> 55 * inaccuracy here due to racing. >> 56 */ >> 57 bool page_cma = is_migrate_cma_page(page); >> 58 int mapcount; >> 59 char *type = ""; 64 60 65 if (i >= ARRAY_SIZE(page_type_names)) !! 61 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { 66 return "unknown"; !! 62 /* 67 return page_type_names[i]; !! 63 * Corrupt page, so we cannot call page_mapping. Instead, do a 68 } !! 64 * safe subset of the steps that page_mapping() does. Caution: >> 65 * this will be misleading for tail pages, PageSwapCache pages, >> 66 * and potentially other situations. (See the page_mapping() >> 67 * implementation for what's missing here.) >> 68 */ >> 69 unsigned long tmp = (unsigned long)page->mapping; >> 70 >> 71 if (tmp & PAGE_MAPPING_ANON) >> 72 mapping = NULL; >> 73 else >> 74 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); >> 75 head = page; >> 76 compound = false; >> 77 } else { >> 78 mapping = page_mapping(page); >> 79 } 69 80 70 static void __dump_folio(struct folio *folio, !! 81 /* 71 unsigned long pfn, unsigned lo !! 82 * Avoid VM_BUG_ON() in page_mapcount(). 72 { !! 83 * page->_mapcount space in struct page is used by sl[aou]b pages to 73 struct address_space *mapping = folio_ !! 84 * encode own info. 74 int mapcount = atomic_read(&page->_map !! 85 */ 75 char *type = ""; !! 86 mapcount = PageSlab(head) ? 0 : page_mapcount(page); 76 87 77 mapcount = page_mapcount_is_type(mapco !! 88 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", 78 pr_warn("page: refcount:%d mapcount:%d !! 89 page, page_ref_count(head), mapcount, mapping, 79 folio_ref_count(folio) !! 90 page_to_pgoff(page), page_to_pfn(page)); 80 folio->index + idx, pf !! 91 if (compound) { 81 if (folio_test_large(folio)) { !! 92 if (hpage_pincount_available(page)) { 82 pr_warn("head: order:%u mapcou !! 93 pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n", 83 folio_order(fo !! 94 head, compound_order(head), 84 folio_mapcount !! 95 head_compound_mapcount(head), 85 folio_entire_m !! 96 head_compound_pincount(head)); 86 folio_nr_pages !! 97 } else { 87 atomic_read(&f !! 98 pr_warn("head:%p order:%u compound_mapcount:%d\n", >> 99 head, compound_order(head), >> 100 head_compound_mapcount(head)); >> 101 } 88 } 102 } 89 103 90 #ifdef CONFIG_MEMCG 104 #ifdef CONFIG_MEMCG 91 if (folio->memcg_data) !! 105 if (head->memcg_data) 92 pr_warn("memcg:%lx\n", folio-> !! 106 pr_warn("memcg:%lx\n", head->memcg_data); 93 #endif 107 #endif 94 if (folio_test_ksm(folio)) !! 108 if (PageKsm(page)) 95 type = "ksm "; 109 type = "ksm "; 96 else if (folio_test_anon(folio)) !! 110 else if (PageAnon(page)) 97 type = "anon "; 111 type = "anon "; 98 else if (mapping) !! 112 else if (mapping) { 99 dump_mapping(mapping); !! 113 struct inode *host; 100 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names !! 114 const struct address_space_operations *a_ops; 101 !! 115 struct hlist_node *dentry_first; 102 /* !! 116 struct dentry *dentry_ptr; 103 * Accessing the pageblock without the !! 117 struct dentry dentry; 104 * "isolate" again in the meantime, bu !! 118 unsigned long ino; 105 * state for debugging, it should be f !! 119 106 * inaccuracy here due to racing. !! 120 /* 107 */ !! 121 * mapping can be invalid pointer and we don't want to crash 108 pr_warn("%sflags: %pGp%s\n", type, &fo !! 122 * accessing it, so probe everything depending on it carefully 109 is_migrate_cma_folio(folio, pf !! 123 */ 110 if (page_has_type(&folio->page)) !! 124 if (get_kernel_nofault(host, &mapping->host) || 111 pr_warn("page_type: %x(%s)\n", !! 125 get_kernel_nofault(a_ops, &mapping->a_ops)) { 112 page_type_name !! 126 pr_warn("failed to read mapping contents, not a valid kernel address?\n"); >> 127 goto out_mapping; >> 128 } 113 129 114 print_hex_dump(KERN_WARNING, "raw: ", !! 130 if (!host) { 115 sizeof(unsigned long), !! 131 pr_warn("aops:%ps\n", a_ops); 116 sizeof(struct page), f !! 132 goto out_mapping; 117 if (folio_test_large(folio)) !! 133 } 118 print_hex_dump(KERN_WARNING, " << 119 sizeof(unsigned long), << 120 2 * sizeof(struct page << 121 } << 122 134 123 static void __dump_page(const struct page *pag !! 135 if (get_kernel_nofault(dentry_first, &host->i_dentry.first) || 124 { !! 136 get_kernel_nofault(ino, &host->i_ino)) { 125 struct folio *foliop, folio; !! 137 pr_warn("aops:%ps with invalid host inode %px\n", 126 struct page precise; !! 138 a_ops, host); 127 unsigned long pfn = page_to_pfn(page); !! 139 goto out_mapping; 128 unsigned long idx, nr_pages = 1; !! 140 } 129 int loops = 5; << 130 << 131 again: << 132 memcpy(&precise, page, sizeof(*page)); << 133 foliop = page_folio(&precise); << 134 if (foliop == (struct folio *)&precise << 135 idx = 0; << 136 if (!folio_test_large(foliop)) << 137 goto dump; << 138 foliop = (struct folio *)page; << 139 } else { << 140 idx = folio_page_idx(foliop, p << 141 } << 142 141 143 if (idx < MAX_FOLIO_NR_PAGES) { !! 142 if (!dentry_first) { 144 memcpy(&folio, foliop, 2 * siz !! 143 pr_warn("aops:%ps ino:%lx\n", a_ops, ino); 145 nr_pages = folio_nr_pages(&fol !! 144 goto out_mapping; 146 foliop = &folio; !! 145 } 147 } << 148 146 149 if (idx > nr_pages) { !! 147 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias); 150 if (loops-- > 0) !! 148 if (get_kernel_nofault(dentry, dentry_ptr)) { 151 goto again; !! 149 pr_warn("aops:%ps ino:%lx with invalid dentry %px\n", 152 pr_warn("page does not match f !! 150 a_ops, ino, dentry_ptr); 153 precise.compound_head &= ~1UL; !! 151 } else { 154 foliop = (struct folio *)&prec !! 152 /* 155 idx = 0; !! 153 * if dentry is corrupted, the %pd handler may still >> 154 * crash, but it's unlikely that we reach here with a >> 155 * corrupted struct page >> 156 */ >> 157 pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", >> 158 a_ops, ino, &dentry); >> 159 } 156 } 160 } >> 161 out_mapping: >> 162 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); 157 163 158 dump: !! 164 pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags, 159 __dump_folio(foliop, &precise, pfn, id !! 165 page_cma ? " CMA" : ""); >> 166 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, >> 167 sizeof(unsigned long), page, >> 168 sizeof(struct page), false); >> 169 if (head != page) >> 170 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, >> 171 sizeof(unsigned long), head, >> 172 sizeof(struct page), false); 160 } 173 } 161 174 162 void dump_page(const struct page *page, const !! 175 void dump_page(struct page *page, const char *reason) 163 { 176 { 164 if (PagePoisoned(page)) 177 if (PagePoisoned(page)) 165 pr_warn("page:%p is uninitiali 178 pr_warn("page:%p is uninitialized and poisoned", page); 166 else 179 else 167 __dump_page(page); 180 __dump_page(page); 168 if (reason) 181 if (reason) 169 pr_warn("page dumped because: 182 pr_warn("page dumped because: %s\n", reason); 170 dump_page_owner(page); 183 dump_page_owner(page); 171 } 184 } 172 EXPORT_SYMBOL(dump_page); 185 EXPORT_SYMBOL(dump_page); 173 186 174 #ifdef CONFIG_DEBUG_VM 187 #ifdef CONFIG_DEBUG_VM 175 188 176 void dump_vma(const struct vm_area_struct *vma 189 void dump_vma(const struct vm_area_struct *vma) 177 { 190 { 178 pr_emerg("vma %px start %px end %px mm !! 191 pr_emerg("vma %px start %px end %px\n" >> 192 "next %px prev %px mm %px\n" 179 "prot %lx anon_vma %px vm_ops 193 "prot %lx anon_vma %px vm_ops %px\n" 180 "pgoff %lx file %px private_da 194 "pgoff %lx file %px private_data %px\n" 181 "flags: %#lx(%pGv)\n", 195 "flags: %#lx(%pGv)\n", 182 vma, (void *)vma->vm_start, (v !! 196 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, >> 197 vma->vm_prev, vma->vm_mm, 183 (unsigned long)pgprot_val(vma- 198 (unsigned long)pgprot_val(vma->vm_page_prot), 184 vma->anon_vma, vma->vm_ops, vm 199 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, 185 vma->vm_file, vma->vm_private_ 200 vma->vm_file, vma->vm_private_data, 186 vma->vm_flags, &vma->vm_flags) 201 vma->vm_flags, &vma->vm_flags); 187 } 202 } 188 EXPORT_SYMBOL(dump_vma); 203 EXPORT_SYMBOL(dump_vma); 189 204 190 void dump_mm(const struct mm_struct *mm) 205 void dump_mm(const struct mm_struct *mm) 191 { 206 { 192 pr_emerg("mm %px task_size %lu\n" !! 207 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" 193 "mmap_base %lu mmap_legacy_bas !! 208 #ifdef CONFIG_MMU >> 209 "get_unmapped_area %px\n" >> 210 #endif >> 211 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" 194 "pgd %px mm_users %d mm_count 212 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" 195 "hiwater_rss %lx hiwater_vm %l 213 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" 196 "pinned_vm %llx data_vm %lx ex 214 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" 197 "start_code %lx end_code %lx s 215 "start_code %lx end_code %lx start_data %lx end_data %lx\n" 198 "start_brk %lx brk %lx start_s 216 "start_brk %lx brk %lx start_stack %lx\n" 199 "arg_start %lx arg_end %lx env 217 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" 200 "binfmt %px flags %lx\n" !! 218 "binfmt %px flags %lx core_state %px\n" 201 #ifdef CONFIG_AIO 219 #ifdef CONFIG_AIO 202 "ioctx_table %px\n" 220 "ioctx_table %px\n" 203 #endif 221 #endif 204 #ifdef CONFIG_MEMCG 222 #ifdef CONFIG_MEMCG 205 "owner %px " 223 "owner %px " 206 #endif 224 #endif 207 "exe_file %px\n" 225 "exe_file %px\n" 208 #ifdef CONFIG_MMU_NOTIFIER 226 #ifdef CONFIG_MMU_NOTIFIER 209 "notifier_subscriptions %px\n" 227 "notifier_subscriptions %px\n" 210 #endif 228 #endif 211 #ifdef CONFIG_NUMA_BALANCING 229 #ifdef CONFIG_NUMA_BALANCING 212 "numa_next_scan %lu numa_scan_ 230 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 213 #endif 231 #endif 214 "tlb_flush_pending %d\n" 232 "tlb_flush_pending %d\n" 215 "def_flags: %#lx(%pGv)\n", 233 "def_flags: %#lx(%pGv)\n", 216 234 217 mm, mm->task_size, !! 235 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, 218 mm->mmap_base, mm->mmap_legacy !! 236 #ifdef CONFIG_MMU >> 237 mm->get_unmapped_area, >> 238 #endif >> 239 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, 219 mm->pgd, atomic_read(&mm->mm_u 240 mm->pgd, atomic_read(&mm->mm_users), 220 atomic_read(&mm->mm_count), 241 atomic_read(&mm->mm_count), 221 mm_pgtables_bytes(mm), 242 mm_pgtables_bytes(mm), 222 mm->map_count, 243 mm->map_count, 223 mm->hiwater_rss, mm->hiwater_v 244 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, 224 (u64)atomic64_read(&mm->pinned 245 (u64)atomic64_read(&mm->pinned_vm), 225 mm->data_vm, mm->exec_vm, mm-> 246 mm->data_vm, mm->exec_vm, mm->stack_vm, 226 mm->start_code, mm->end_code, 247 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 227 mm->start_brk, mm->brk, mm->st 248 mm->start_brk, mm->brk, mm->start_stack, 228 mm->arg_start, mm->arg_end, mm 249 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, 229 mm->binfmt, mm->flags, !! 250 mm->binfmt, mm->flags, mm->core_state, 230 #ifdef CONFIG_AIO 251 #ifdef CONFIG_AIO 231 mm->ioctx_table, 252 mm->ioctx_table, 232 #endif 253 #endif 233 #ifdef CONFIG_MEMCG 254 #ifdef CONFIG_MEMCG 234 mm->owner, 255 mm->owner, 235 #endif 256 #endif 236 mm->exe_file, 257 mm->exe_file, 237 #ifdef CONFIG_MMU_NOTIFIER 258 #ifdef CONFIG_MMU_NOTIFIER 238 mm->notifier_subscriptions, 259 mm->notifier_subscriptions, 239 #endif 260 #endif 240 #ifdef CONFIG_NUMA_BALANCING 261 #ifdef CONFIG_NUMA_BALANCING 241 mm->numa_next_scan, mm->numa_s 262 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, 242 #endif 263 #endif 243 atomic_read(&mm->tlb_flush_pen 264 atomic_read(&mm->tlb_flush_pending), 244 mm->def_flags, &mm->def_flags 265 mm->def_flags, &mm->def_flags 245 ); 266 ); 246 } 267 } 247 EXPORT_SYMBOL(dump_mm); << 248 268 249 static bool page_init_poisoning __read_mostly 269 static bool page_init_poisoning __read_mostly = true; 250 270 251 static int __init setup_vm_debug(char *str) 271 static int __init setup_vm_debug(char *str) 252 { 272 { 253 bool __page_init_poisoning = true; 273 bool __page_init_poisoning = true; 254 274 255 /* 275 /* 256 * Calling vm_debug with no arguments 276 * Calling vm_debug with no arguments is equivalent to requesting 257 * to enable all debugging options we 277 * to enable all debugging options we can control. 258 */ 278 */ 259 if (*str++ != '=' || !*str) 279 if (*str++ != '=' || !*str) 260 goto out; 280 goto out; 261 281 262 __page_init_poisoning = false; 282 __page_init_poisoning = false; 263 if (*str == '-') 283 if (*str == '-') 264 goto out; 284 goto out; 265 285 266 while (*str) { 286 while (*str) { 267 switch (tolower(*str)) { 287 switch (tolower(*str)) { 268 case'p': 288 case'p': 269 __page_init_poisoning 289 __page_init_poisoning = true; 270 break; 290 break; 271 default: 291 default: 272 pr_err("vm_debug optio 292 pr_err("vm_debug option '%c' unknown. skipped\n", 273 *str); 293 *str); 274 } 294 } 275 295 276 str++; 296 str++; 277 } 297 } 278 out: 298 out: 279 if (page_init_poisoning && !__page_ini 299 if (page_init_poisoning && !__page_init_poisoning) 280 pr_warn("Page struct poisoning 300 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); 281 301 282 page_init_poisoning = __page_init_pois 302 page_init_poisoning = __page_init_poisoning; 283 303 284 return 1; 304 return 1; 285 } 305 } 286 __setup("vm_debug", setup_vm_debug); 306 __setup("vm_debug", setup_vm_debug); 287 307 288 void page_init_poison(struct page *page, size_ 308 void page_init_poison(struct page *page, size_t size) 289 { 309 { 290 if (page_init_poisoning) 310 if (page_init_poisoning) 291 memset(page, PAGE_POISON_PATTE 311 memset(page, PAGE_POISON_PATTERN, size); 292 } 312 } 293 !! 313 EXPORT_SYMBOL_GPL(page_init_poison); 294 void vma_iter_dump_tree(const struct vma_itera << 295 { << 296 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) << 297 mas_dump(&vmi->mas); << 298 mt_dump(vmi->mas.tree, mt_dump_hex); << 299 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ << 300 } << 301 << 302 #endif /* CONFIG_DEBUG_VM */ 314 #endif /* CONFIG_DEBUG_VM */ 303 315
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.