1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/debugfs.h> 2 #include <linux/debugfs.h> 3 #include <linux/mm.h> 3 #include <linux/mm.h> 4 #include <linux/slab.h> 4 #include <linux/slab.h> 5 #include <linux/uaccess.h> 5 #include <linux/uaccess.h> 6 #include <linux/memblock.h> 6 #include <linux/memblock.h> 7 #include <linux/stacktrace.h> 7 #include <linux/stacktrace.h> 8 #include <linux/page_owner.h> 8 #include <linux/page_owner.h> 9 #include <linux/jump_label.h> 9 #include <linux/jump_label.h> 10 #include <linux/migrate.h> 10 #include <linux/migrate.h> 11 #include <linux/stackdepot.h> 11 #include <linux/stackdepot.h> 12 #include <linux/seq_file.h> 12 #include <linux/seq_file.h> 13 #include <linux/memcontrol.h> 13 #include <linux/memcontrol.h> 14 #include <linux/sched/clock.h> 14 #include <linux/sched/clock.h> 15 15 16 #include "internal.h" 16 #include "internal.h" 17 17 18 /* 18 /* 19 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_ 19 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) 20 * to use off stack temporal storage 20 * to use off stack temporal storage 21 */ 21 */ 22 #define PAGE_OWNER_STACK_DEPTH (16) 22 #define PAGE_OWNER_STACK_DEPTH (16) 23 23 24 struct page_owner { 24 struct page_owner { 25 unsigned short order; 25 unsigned short order; 26 short last_migrate_reason; 26 short last_migrate_reason; 27 gfp_t gfp_mask; 27 gfp_t gfp_mask; 28 depot_stack_handle_t handle; 28 depot_stack_handle_t handle; 29 depot_stack_handle_t free_handle; 29 depot_stack_handle_t free_handle; 30 u64 ts_nsec; 30 u64 ts_nsec; 31 u64 free_ts_nsec; 31 u64 free_ts_nsec; 32 char comm[TASK_COMM_LEN]; 32 char comm[TASK_COMM_LEN]; 33 pid_t pid; 33 pid_t pid; 34 pid_t tgid; 34 pid_t tgid; 35 pid_t free_pid; 35 pid_t free_pid; 36 pid_t free_tgid; 36 pid_t free_tgid; 37 }; 37 }; 38 38 39 struct stack { 39 struct stack { 40 struct stack_record *stack_record; 40 struct stack_record *stack_record; 41 struct stack *next; 41 struct stack *next; 42 }; 42 }; 43 static struct stack dummy_stack; 43 static struct stack dummy_stack; 44 static struct stack failure_stack; 44 static struct stack failure_stack; 45 static struct stack *stack_list; 45 static struct stack *stack_list; 46 static DEFINE_SPINLOCK(stack_list_lock); 46 static DEFINE_SPINLOCK(stack_list_lock); 47 47 48 static bool page_owner_enabled __initdata; 48 static bool page_owner_enabled __initdata; 49 DEFINE_STATIC_KEY_FALSE(page_owner_inited); 49 DEFINE_STATIC_KEY_FALSE(page_owner_inited); 50 50 51 static depot_stack_handle_t dummy_handle; 51 static depot_stack_handle_t dummy_handle; 52 static depot_stack_handle_t failure_handle; 52 static depot_stack_handle_t failure_handle; 53 static depot_stack_handle_t early_handle; 53 static depot_stack_handle_t early_handle; 54 54 55 static void init_early_allocated_pages(void); 55 static void init_early_allocated_pages(void); 56 56 57 static inline void set_current_in_page_owner(v 57 static inline void set_current_in_page_owner(void) 58 { 58 { 59 /* 59 /* 60 * Avoid recursion. 60 * Avoid recursion. 61 * 61 * 62 * We might need to allocate more memo 62 * We might need to allocate more memory from page_owner code, so make 63 * sure to signal it in order to avoid 63 * sure to signal it in order to avoid recursion. 64 */ 64 */ 65 current->in_page_owner = 1; 65 current->in_page_owner = 1; 66 } 66 } 67 67 68 static inline void unset_current_in_page_owner 68 static inline void unset_current_in_page_owner(void) 69 { 69 { 70 current->in_page_owner = 0; 70 current->in_page_owner = 0; 71 } 71 } 72 72 73 static int __init early_page_owner_param(char 73 static int __init early_page_owner_param(char *buf) 74 { 74 { 75 int ret = kstrtobool(buf, &page_owner_ 75 int ret = kstrtobool(buf, &page_owner_enabled); 76 76 77 if (page_owner_enabled) 77 if (page_owner_enabled) 78 stack_depot_request_early_init 78 stack_depot_request_early_init(); 79 79 80 return ret; 80 return ret; 81 } 81 } 82 early_param("page_owner", early_page_owner_par 82 early_param("page_owner", early_page_owner_param); 83 83 84 static __init bool need_page_owner(void) 84 static __init bool need_page_owner(void) 85 { 85 { 86 return page_owner_enabled; 86 return page_owner_enabled; 87 } 87 } 88 88 89 static __always_inline depot_stack_handle_t cr 89 static __always_inline depot_stack_handle_t create_dummy_stack(void) 90 { 90 { 91 unsigned long entries[4]; 91 unsigned long entries[4]; 92 unsigned int nr_entries; 92 unsigned int nr_entries; 93 93 94 nr_entries = stack_trace_save(entries, 94 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); 95 return stack_depot_save(entries, nr_en 95 return stack_depot_save(entries, nr_entries, GFP_KERNEL); 96 } 96 } 97 97 98 static noinline void register_dummy_stack(void 98 static noinline void register_dummy_stack(void) 99 { 99 { 100 dummy_handle = create_dummy_stack(); 100 dummy_handle = create_dummy_stack(); 101 } 101 } 102 102 103 static noinline void register_failure_stack(vo 103 static noinline void register_failure_stack(void) 104 { 104 { 105 failure_handle = create_dummy_stack(); 105 failure_handle = create_dummy_stack(); 106 } 106 } 107 107 108 static noinline void register_early_stack(void 108 static noinline void register_early_stack(void) 109 { 109 { 110 early_handle = create_dummy_stack(); 110 early_handle = create_dummy_stack(); 111 } 111 } 112 112 113 static __init void init_page_owner(void) 113 static __init void init_page_owner(void) 114 { 114 { 115 if (!page_owner_enabled) 115 if (!page_owner_enabled) 116 return; 116 return; 117 117 118 register_dummy_stack(); 118 register_dummy_stack(); 119 register_failure_stack(); 119 register_failure_stack(); 120 register_early_stack(); 120 register_early_stack(); 121 init_early_allocated_pages(); 121 init_early_allocated_pages(); 122 /* Initialize dummy and failure stacks 122 /* Initialize dummy and failure stacks and link them to stack_list */ 123 dummy_stack.stack_record = __stack_dep 123 dummy_stack.stack_record = __stack_depot_get_stack_record(dummy_handle); 124 failure_stack.stack_record = __stack_d 124 failure_stack.stack_record = __stack_depot_get_stack_record(failure_handle); 125 if (dummy_stack.stack_record) 125 if (dummy_stack.stack_record) 126 refcount_set(&dummy_stack.stac 126 refcount_set(&dummy_stack.stack_record->count, 1); 127 if (failure_stack.stack_record) 127 if (failure_stack.stack_record) 128 refcount_set(&failure_stack.st 128 refcount_set(&failure_stack.stack_record->count, 1); 129 dummy_stack.next = &failure_stack; 129 dummy_stack.next = &failure_stack; 130 stack_list = &dummy_stack; 130 stack_list = &dummy_stack; 131 static_branch_enable(&page_owner_inite 131 static_branch_enable(&page_owner_inited); 132 } 132 } 133 133 134 struct page_ext_operations page_owner_ops = { 134 struct page_ext_operations page_owner_ops = { 135 .size = sizeof(struct page_owner), 135 .size = sizeof(struct page_owner), 136 .need = need_page_owner, 136 .need = need_page_owner, 137 .init = init_page_owner, 137 .init = init_page_owner, 138 .need_shared_flags = true, 138 .need_shared_flags = true, 139 }; 139 }; 140 140 141 static inline struct page_owner *get_page_owne 141 static inline struct page_owner *get_page_owner(struct page_ext *page_ext) 142 { 142 { 143 return page_ext_data(page_ext, &page_o 143 return page_ext_data(page_ext, &page_owner_ops); 144 } 144 } 145 145 146 static noinline depot_stack_handle_t save_stac 146 static noinline depot_stack_handle_t save_stack(gfp_t flags) 147 { 147 { 148 unsigned long entries[PAGE_OWNER_STACK 148 unsigned long entries[PAGE_OWNER_STACK_DEPTH]; 149 depot_stack_handle_t handle; 149 depot_stack_handle_t handle; 150 unsigned int nr_entries; 150 unsigned int nr_entries; 151 151 152 if (current->in_page_owner) 152 if (current->in_page_owner) 153 return dummy_handle; 153 return dummy_handle; 154 154 155 set_current_in_page_owner(); 155 set_current_in_page_owner(); 156 nr_entries = stack_trace_save(entries, 156 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); 157 handle = stack_depot_save(entries, nr_ 157 handle = stack_depot_save(entries, nr_entries, flags); 158 if (!handle) 158 if (!handle) 159 handle = failure_handle; 159 handle = failure_handle; 160 unset_current_in_page_owner(); 160 unset_current_in_page_owner(); 161 161 162 return handle; 162 return handle; 163 } 163 } 164 164 165 static void add_stack_record_to_list(struct st 165 static void add_stack_record_to_list(struct stack_record *stack_record, 166 gfp_t gfp 166 gfp_t gfp_mask) 167 { 167 { 168 unsigned long flags; 168 unsigned long flags; 169 struct stack *stack; 169 struct stack *stack; 170 170 >> 171 /* Filter gfp_mask the same way stackdepot does, for consistency */ >> 172 gfp_mask &= ~GFP_ZONEMASK; >> 173 gfp_mask &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP); >> 174 gfp_mask |= __GFP_NOWARN; >> 175 171 set_current_in_page_owner(); 176 set_current_in_page_owner(); 172 stack = kmalloc(sizeof(*stack), gfp_ne !! 177 stack = kmalloc(sizeof(*stack), gfp_mask); 173 if (!stack) { 178 if (!stack) { 174 unset_current_in_page_owner(); 179 unset_current_in_page_owner(); 175 return; 180 return; 176 } 181 } 177 unset_current_in_page_owner(); 182 unset_current_in_page_owner(); 178 183 179 stack->stack_record = stack_record; 184 stack->stack_record = stack_record; 180 stack->next = NULL; 185 stack->next = NULL; 181 186 182 spin_lock_irqsave(&stack_list_lock, fl 187 spin_lock_irqsave(&stack_list_lock, flags); 183 stack->next = stack_list; 188 stack->next = stack_list; 184 /* 189 /* 185 * This pairs with smp_load_acquire() 190 * This pairs with smp_load_acquire() from function 186 * stack_start(). This guarantees that 191 * stack_start(). This guarantees that stack_start() 187 * will see an updated stack_list befo 192 * will see an updated stack_list before starting to 188 * traverse the list. 193 * traverse the list. 189 */ 194 */ 190 smp_store_release(&stack_list, stack); 195 smp_store_release(&stack_list, stack); 191 spin_unlock_irqrestore(&stack_list_loc 196 spin_unlock_irqrestore(&stack_list_lock, flags); 192 } 197 } 193 198 194 static void inc_stack_record_count(depot_stack 199 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, 195 int nr_base 200 int nr_base_pages) 196 { 201 { 197 struct stack_record *stack_record = __ 202 struct stack_record *stack_record = __stack_depot_get_stack_record(handle); 198 203 199 if (!stack_record) 204 if (!stack_record) 200 return; 205 return; 201 206 202 /* 207 /* 203 * New stack_record's that do not use 208 * New stack_record's that do not use STACK_DEPOT_FLAG_GET start 204 * with REFCOUNT_SATURATED to catch sp 209 * with REFCOUNT_SATURATED to catch spurious increments of their 205 * refcount. 210 * refcount. 206 * Since we do not use STACK_DEPOT_FLA 211 * Since we do not use STACK_DEPOT_FLAG_GET API, let us 207 * set a refcount of 1 ourselves. 212 * set a refcount of 1 ourselves. 208 */ 213 */ 209 if (refcount_read(&stack_record->count 214 if (refcount_read(&stack_record->count) == REFCOUNT_SATURATED) { 210 int old = REFCOUNT_SATURATED; 215 int old = REFCOUNT_SATURATED; 211 216 212 if (atomic_try_cmpxchg_relaxed 217 if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1)) 213 /* Add the new stack_r 218 /* Add the new stack_record to our list */ 214 add_stack_record_to_li 219 add_stack_record_to_list(stack_record, gfp_mask); 215 } 220 } 216 refcount_add(nr_base_pages, &stack_rec 221 refcount_add(nr_base_pages, &stack_record->count); 217 } 222 } 218 223 219 static void dec_stack_record_count(depot_stack 224 static void dec_stack_record_count(depot_stack_handle_t handle, 220 int nr_base 225 int nr_base_pages) 221 { 226 { 222 struct stack_record *stack_record = __ 227 struct stack_record *stack_record = __stack_depot_get_stack_record(handle); 223 228 224 if (!stack_record) 229 if (!stack_record) 225 return; 230 return; 226 231 227 if (refcount_sub_and_test(nr_base_page 232 if (refcount_sub_and_test(nr_base_pages, &stack_record->count)) 228 pr_warn("%s: refcount went to 233 pr_warn("%s: refcount went to 0 for %u handle\n", __func__, 229 handle); 234 handle); 230 } 235 } 231 236 232 static inline void __update_page_owner_handle( 237 static inline void __update_page_owner_handle(struct page_ext *page_ext, 233 238 depot_stack_handle_t handle, 234 239 unsigned short order, 235 240 gfp_t gfp_mask, 236 241 short last_migrate_reason, u64 ts_nsec, 237 242 pid_t pid, pid_t tgid, char *comm) 238 { 243 { 239 int i; 244 int i; 240 struct page_owner *page_owner; 245 struct page_owner *page_owner; 241 246 242 for (i = 0; i < (1 << order); i++) { 247 for (i = 0; i < (1 << order); i++) { 243 page_owner = get_page_owner(pa 248 page_owner = get_page_owner(page_ext); 244 page_owner->handle = handle; 249 page_owner->handle = handle; 245 page_owner->order = order; 250 page_owner->order = order; 246 page_owner->gfp_mask = gfp_mas 251 page_owner->gfp_mask = gfp_mask; 247 page_owner->last_migrate_reaso 252 page_owner->last_migrate_reason = last_migrate_reason; 248 page_owner->pid = pid; 253 page_owner->pid = pid; 249 page_owner->tgid = tgid; 254 page_owner->tgid = tgid; 250 page_owner->ts_nsec = ts_nsec; 255 page_owner->ts_nsec = ts_nsec; 251 strscpy(page_owner->comm, comm 256 strscpy(page_owner->comm, comm, 252 sizeof(page_owner->com 257 sizeof(page_owner->comm)); 253 __set_bit(PAGE_EXT_OWNER, &pag 258 __set_bit(PAGE_EXT_OWNER, &page_ext->flags); 254 __set_bit(PAGE_EXT_OWNER_ALLOC 259 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); 255 page_ext = page_ext_next(page_ 260 page_ext = page_ext_next(page_ext); 256 } 261 } 257 } 262 } 258 263 259 static inline void __update_page_owner_free_ha 264 static inline void __update_page_owner_free_handle(struct page_ext *page_ext, 260 265 depot_stack_handle_t handle, 261 266 unsigned short order, 262 267 pid_t pid, pid_t tgid, 263 268 u64 free_ts_nsec) 264 { 269 { 265 int i; 270 int i; 266 struct page_owner *page_owner; 271 struct page_owner *page_owner; 267 272 268 for (i = 0; i < (1 << order); i++) { 273 for (i = 0; i < (1 << order); i++) { 269 page_owner = get_page_owner(pa 274 page_owner = get_page_owner(page_ext); 270 /* Only __reset_page_owner() w 275 /* Only __reset_page_owner() wants to clear the bit */ 271 if (handle) { 276 if (handle) { 272 __clear_bit(PAGE_EXT_O 277 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); 273 page_owner->free_handl 278 page_owner->free_handle = handle; 274 } 279 } 275 page_owner->free_ts_nsec = fre 280 page_owner->free_ts_nsec = free_ts_nsec; 276 page_owner->free_pid = current 281 page_owner->free_pid = current->pid; 277 page_owner->free_tgid = curren 282 page_owner->free_tgid = current->tgid; 278 page_ext = page_ext_next(page_ 283 page_ext = page_ext_next(page_ext); 279 } 284 } 280 } 285 } 281 286 282 void __reset_page_owner(struct page *page, uns 287 void __reset_page_owner(struct page *page, unsigned short order) 283 { 288 { 284 struct page_ext *page_ext; 289 struct page_ext *page_ext; 285 depot_stack_handle_t handle; 290 depot_stack_handle_t handle; 286 depot_stack_handle_t alloc_handle; 291 depot_stack_handle_t alloc_handle; 287 struct page_owner *page_owner; 292 struct page_owner *page_owner; 288 u64 free_ts_nsec = local_clock(); 293 u64 free_ts_nsec = local_clock(); 289 294 290 page_ext = page_ext_get(page); 295 page_ext = page_ext_get(page); 291 if (unlikely(!page_ext)) 296 if (unlikely(!page_ext)) 292 return; 297 return; 293 298 294 page_owner = get_page_owner(page_ext); 299 page_owner = get_page_owner(page_ext); 295 alloc_handle = page_owner->handle; 300 alloc_handle = page_owner->handle; 296 301 297 handle = save_stack(GFP_NOWAIT | __GFP 302 handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); 298 __update_page_owner_free_handle(page_e 303 __update_page_owner_free_handle(page_ext, handle, order, current->pid, 299 curren 304 current->tgid, free_ts_nsec); 300 page_ext_put(page_ext); 305 page_ext_put(page_ext); 301 306 302 if (alloc_handle != early_handle) 307 if (alloc_handle != early_handle) 303 /* 308 /* 304 * early_handle is being set a 309 * early_handle is being set as a handle for all those 305 * early allocated pages. See 310 * early allocated pages. See init_pages_in_zone(). 306 * Since their refcount is not 311 * Since their refcount is not being incremented because 307 * the machinery is not ready 312 * the machinery is not ready yet, we cannot decrement 308 * their refcount either. 313 * their refcount either. 309 */ 314 */ 310 dec_stack_record_count(alloc_h 315 dec_stack_record_count(alloc_handle, 1 << order); 311 } 316 } 312 317 313 noinline void __set_page_owner(struct page *pa 318 noinline void __set_page_owner(struct page *page, unsigned short order, 314 gfp_t 319 gfp_t gfp_mask) 315 { 320 { 316 struct page_ext *page_ext; 321 struct page_ext *page_ext; 317 u64 ts_nsec = local_clock(); 322 u64 ts_nsec = local_clock(); 318 depot_stack_handle_t handle; 323 depot_stack_handle_t handle; 319 324 320 handle = save_stack(gfp_mask); 325 handle = save_stack(gfp_mask); 321 326 322 page_ext = page_ext_get(page); 327 page_ext = page_ext_get(page); 323 if (unlikely(!page_ext)) 328 if (unlikely(!page_ext)) 324 return; 329 return; 325 __update_page_owner_handle(page_ext, h 330 __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1, 326 ts_nsec, cu 331 ts_nsec, current->pid, current->tgid, 327 current->co 332 current->comm); 328 page_ext_put(page_ext); 333 page_ext_put(page_ext); 329 inc_stack_record_count(handle, gfp_mas 334 inc_stack_record_count(handle, gfp_mask, 1 << order); 330 } 335 } 331 336 332 void __set_page_owner_migrate_reason(struct pa 337 void __set_page_owner_migrate_reason(struct page *page, int reason) 333 { 338 { 334 struct page_ext *page_ext = page_ext_g 339 struct page_ext *page_ext = page_ext_get(page); 335 struct page_owner *page_owner; 340 struct page_owner *page_owner; 336 341 337 if (unlikely(!page_ext)) 342 if (unlikely(!page_ext)) 338 return; 343 return; 339 344 340 page_owner = get_page_owner(page_ext); 345 page_owner = get_page_owner(page_ext); 341 page_owner->last_migrate_reason = reas 346 page_owner->last_migrate_reason = reason; 342 page_ext_put(page_ext); 347 page_ext_put(page_ext); 343 } 348 } 344 349 345 void __split_page_owner(struct page *page, int 350 void __split_page_owner(struct page *page, int old_order, int new_order) 346 { 351 { 347 int i; 352 int i; 348 struct page_ext *page_ext = page_ext_g 353 struct page_ext *page_ext = page_ext_get(page); 349 struct page_owner *page_owner; 354 struct page_owner *page_owner; 350 355 351 if (unlikely(!page_ext)) 356 if (unlikely(!page_ext)) 352 return; 357 return; 353 358 354 for (i = 0; i < (1 << old_order); i++) 359 for (i = 0; i < (1 << old_order); i++) { 355 page_owner = get_page_owner(pa 360 page_owner = get_page_owner(page_ext); 356 page_owner->order = new_order; 361 page_owner->order = new_order; 357 page_ext = page_ext_next(page_ 362 page_ext = page_ext_next(page_ext); 358 } 363 } 359 page_ext_put(page_ext); 364 page_ext_put(page_ext); 360 } 365 } 361 366 362 void __folio_copy_owner(struct folio *newfolio 367 void __folio_copy_owner(struct folio *newfolio, struct folio *old) 363 { 368 { 364 int i; 369 int i; 365 struct page_ext *old_ext; 370 struct page_ext *old_ext; 366 struct page_ext *new_ext; 371 struct page_ext *new_ext; 367 struct page_owner *old_page_owner; 372 struct page_owner *old_page_owner; 368 struct page_owner *new_page_owner; 373 struct page_owner *new_page_owner; 369 depot_stack_handle_t migrate_handle; 374 depot_stack_handle_t migrate_handle; 370 375 371 old_ext = page_ext_get(&old->page); 376 old_ext = page_ext_get(&old->page); 372 if (unlikely(!old_ext)) 377 if (unlikely(!old_ext)) 373 return; 378 return; 374 379 375 new_ext = page_ext_get(&newfolio->page 380 new_ext = page_ext_get(&newfolio->page); 376 if (unlikely(!new_ext)) { 381 if (unlikely(!new_ext)) { 377 page_ext_put(old_ext); 382 page_ext_put(old_ext); 378 return; 383 return; 379 } 384 } 380 385 381 old_page_owner = get_page_owner(old_ex 386 old_page_owner = get_page_owner(old_ext); 382 new_page_owner = get_page_owner(new_ex 387 new_page_owner = get_page_owner(new_ext); 383 migrate_handle = new_page_owner->handl 388 migrate_handle = new_page_owner->handle; 384 __update_page_owner_handle(new_ext, ol 389 __update_page_owner_handle(new_ext, old_page_owner->handle, 385 old_page_ow 390 old_page_owner->order, old_page_owner->gfp_mask, 386 old_page_ow 391 old_page_owner->last_migrate_reason, 387 old_page_ow 392 old_page_owner->ts_nsec, old_page_owner->pid, 388 old_page_ow 393 old_page_owner->tgid, old_page_owner->comm); 389 /* 394 /* 390 * Do not proactively clear PAGE_EXT_O 395 * Do not proactively clear PAGE_EXT_OWNER{_ALLOCATED} bits as the folio 391 * will be freed after migration. Keep 396 * will be freed after migration. Keep them until then as they may be 392 * useful. 397 * useful. 393 */ 398 */ 394 __update_page_owner_free_handle(new_ex 399 __update_page_owner_free_handle(new_ext, 0, old_page_owner->order, 395 old_pa 400 old_page_owner->free_pid, 396 old_pa 401 old_page_owner->free_tgid, 397 old_pa 402 old_page_owner->free_ts_nsec); 398 /* 403 /* 399 * We linked the original stack to the 404 * We linked the original stack to the new folio, we need to do the same 400 * for the new one and the old folio o 405 * for the new one and the old folio otherwise there will be an imbalance 401 * when subtracting those pages from t 406 * when subtracting those pages from the stack. 402 */ 407 */ 403 for (i = 0; i < (1 << new_page_owner-> 408 for (i = 0; i < (1 << new_page_owner->order); i++) { 404 old_page_owner->handle = migra 409 old_page_owner->handle = migrate_handle; 405 old_ext = page_ext_next(old_ex 410 old_ext = page_ext_next(old_ext); 406 old_page_owner = get_page_owne 411 old_page_owner = get_page_owner(old_ext); 407 } 412 } 408 413 409 page_ext_put(new_ext); 414 page_ext_put(new_ext); 410 page_ext_put(old_ext); 415 page_ext_put(old_ext); 411 } 416 } 412 417 413 void pagetypeinfo_showmixedcount_print(struct 418 void pagetypeinfo_showmixedcount_print(struct seq_file *m, 414 pg_data 419 pg_data_t *pgdat, struct zone *zone) 415 { 420 { 416 struct page *page; 421 struct page *page; 417 struct page_ext *page_ext; 422 struct page_ext *page_ext; 418 struct page_owner *page_owner; 423 struct page_owner *page_owner; 419 unsigned long pfn, block_end_pfn; 424 unsigned long pfn, block_end_pfn; 420 unsigned long end_pfn = zone_end_pfn(z 425 unsigned long end_pfn = zone_end_pfn(zone); 421 unsigned long count[MIGRATE_TYPES] = { 426 unsigned long count[MIGRATE_TYPES] = { 0, }; 422 int pageblock_mt, page_mt; 427 int pageblock_mt, page_mt; 423 int i; 428 int i; 424 429 425 /* Scan block by block. First and last 430 /* Scan block by block. First and last block may be incomplete */ 426 pfn = zone->zone_start_pfn; 431 pfn = zone->zone_start_pfn; 427 432 428 /* 433 /* 429 * Walk the zone in pageblock_nr_pages 434 * Walk the zone in pageblock_nr_pages steps. If a page block spans 430 * a zone boundary, it will be double 435 * a zone boundary, it will be double counted between zones. This does 431 * not matter as the mixed block count 436 * not matter as the mixed block count will still be correct 432 */ 437 */ 433 for (; pfn < end_pfn; ) { 438 for (; pfn < end_pfn; ) { 434 page = pfn_to_online_page(pfn) 439 page = pfn_to_online_page(pfn); 435 if (!page) { 440 if (!page) { 436 pfn = ALIGN(pfn + 1, M 441 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); 437 continue; 442 continue; 438 } 443 } 439 444 440 block_end_pfn = pageblock_end_ 445 block_end_pfn = pageblock_end_pfn(pfn); 441 block_end_pfn = min(block_end_ 446 block_end_pfn = min(block_end_pfn, end_pfn); 442 447 443 pageblock_mt = get_pageblock_m 448 pageblock_mt = get_pageblock_migratetype(page); 444 449 445 for (; pfn < block_end_pfn; pf 450 for (; pfn < block_end_pfn; pfn++) { 446 /* The pageblock is on 451 /* The pageblock is online, no need to recheck. */ 447 page = pfn_to_page(pfn 452 page = pfn_to_page(pfn); 448 453 449 if (page_zone(page) != 454 if (page_zone(page) != zone) 450 continue; 455 continue; 451 456 452 if (PageBuddy(page)) { 457 if (PageBuddy(page)) { 453 unsigned long 458 unsigned long freepage_order; 454 459 455 freepage_order 460 freepage_order = buddy_order_unsafe(page); 456 if (freepage_o 461 if (freepage_order <= MAX_PAGE_ORDER) 457 pfn += 462 pfn += (1UL << freepage_order) - 1; 458 continue; 463 continue; 459 } 464 } 460 465 461 if (PageReserved(page) 466 if (PageReserved(page)) 462 continue; 467 continue; 463 468 464 page_ext = page_ext_ge 469 page_ext = page_ext_get(page); 465 if (unlikely(!page_ext 470 if (unlikely(!page_ext)) 466 continue; 471 continue; 467 472 468 if (!test_bit(PAGE_EXT 473 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) 469 goto ext_put_c 474 goto ext_put_continue; 470 475 471 page_owner = get_page_ 476 page_owner = get_page_owner(page_ext); 472 page_mt = gfp_migratet 477 page_mt = gfp_migratetype(page_owner->gfp_mask); 473 if (pageblock_mt != pa 478 if (pageblock_mt != page_mt) { 474 if (is_migrate 479 if (is_migrate_cma(pageblock_mt)) 475 count[ 480 count[MIGRATE_MOVABLE]++; 476 else 481 else 477 count[ 482 count[pageblock_mt]++; 478 483 479 pfn = block_en 484 pfn = block_end_pfn; 480 page_ext_put(p 485 page_ext_put(page_ext); 481 break; 486 break; 482 } 487 } 483 pfn += (1UL << page_ow 488 pfn += (1UL << page_owner->order) - 1; 484 ext_put_continue: 489 ext_put_continue: 485 page_ext_put(page_ext) 490 page_ext_put(page_ext); 486 } 491 } 487 } 492 } 488 493 489 /* Print counts */ 494 /* Print counts */ 490 seq_printf(m, "Node %d, zone %8s ", pg 495 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 491 for (i = 0; i < MIGRATE_TYPES; i++) 496 for (i = 0; i < MIGRATE_TYPES; i++) 492 seq_printf(m, "%12lu ", count[ 497 seq_printf(m, "%12lu ", count[i]); 493 seq_putc(m, '\n'); 498 seq_putc(m, '\n'); 494 } 499 } 495 500 496 /* 501 /* 497 * Looking for memcg information and print it 502 * Looking for memcg information and print it out 498 */ 503 */ 499 static inline int print_page_owner_memcg(char 504 static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret, 500 struc 505 struct page *page) 501 { 506 { 502 #ifdef CONFIG_MEMCG 507 #ifdef CONFIG_MEMCG 503 unsigned long memcg_data; 508 unsigned long memcg_data; 504 struct mem_cgroup *memcg; 509 struct mem_cgroup *memcg; 505 bool online; 510 bool online; 506 char name[80]; 511 char name[80]; 507 512 508 rcu_read_lock(); 513 rcu_read_lock(); 509 memcg_data = READ_ONCE(page->memcg_dat 514 memcg_data = READ_ONCE(page->memcg_data); 510 if (!memcg_data) 515 if (!memcg_data) 511 goto out_unlock; 516 goto out_unlock; 512 517 513 if (memcg_data & MEMCG_DATA_OBJEXTS) !! 518 if (memcg_data & MEMCG_DATA_OBJCGS) 514 ret += scnprintf(kbuf + ret, c 519 ret += scnprintf(kbuf + ret, count - ret, 515 "Slab cache pa 520 "Slab cache page\n"); 516 521 517 memcg = page_memcg_check(page); 522 memcg = page_memcg_check(page); 518 if (!memcg) 523 if (!memcg) 519 goto out_unlock; 524 goto out_unlock; 520 525 521 online = (memcg->css.flags & CSS_ONLIN 526 online = (memcg->css.flags & CSS_ONLINE); 522 cgroup_name(memcg->css.cgroup, name, s 527 cgroup_name(memcg->css.cgroup, name, sizeof(name)); 523 ret += scnprintf(kbuf + ret, count - r 528 ret += scnprintf(kbuf + ret, count - ret, 524 "Charged %sto %smemcg 529 "Charged %sto %smemcg %s\n", 525 PageMemcgKmem(page) ? 530 PageMemcgKmem(page) ? "(via objcg) " : "", 526 online ? "" : "offline 531 online ? "" : "offline ", 527 name); 532 name); 528 out_unlock: 533 out_unlock: 529 rcu_read_unlock(); 534 rcu_read_unlock(); 530 #endif /* CONFIG_MEMCG */ 535 #endif /* CONFIG_MEMCG */ 531 536 532 return ret; 537 return ret; 533 } 538 } 534 539 535 static ssize_t 540 static ssize_t 536 print_page_owner(char __user *buf, size_t coun 541 print_page_owner(char __user *buf, size_t count, unsigned long pfn, 537 struct page *page, struct page 542 struct page *page, struct page_owner *page_owner, 538 depot_stack_handle_t handle) 543 depot_stack_handle_t handle) 539 { 544 { 540 int ret, pageblock_mt, page_mt; 545 int ret, pageblock_mt, page_mt; 541 char *kbuf; 546 char *kbuf; 542 547 543 count = min_t(size_t, count, PAGE_SIZE 548 count = min_t(size_t, count, PAGE_SIZE); 544 kbuf = kmalloc(count, GFP_KERNEL); 549 kbuf = kmalloc(count, GFP_KERNEL); 545 if (!kbuf) 550 if (!kbuf) 546 return -ENOMEM; 551 return -ENOMEM; 547 552 548 ret = scnprintf(kbuf, count, 553 ret = scnprintf(kbuf, count, 549 "Page allocated via or 554 "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n", 550 page_owner->order, pag 555 page_owner->order, page_owner->gfp_mask, 551 &page_owner->gfp_mask, 556 &page_owner->gfp_mask, page_owner->pid, 552 page_owner->tgid, page 557 page_owner->tgid, page_owner->comm, 553 page_owner->ts_nsec); 558 page_owner->ts_nsec); 554 559 555 /* Print information relevant to group 560 /* Print information relevant to grouping pages by mobility */ 556 pageblock_mt = get_pageblock_migratety 561 pageblock_mt = get_pageblock_migratetype(page); 557 page_mt = gfp_migratetype(page_owner- 562 page_mt = gfp_migratetype(page_owner->gfp_mask); 558 ret += scnprintf(kbuf + ret, count - r 563 ret += scnprintf(kbuf + ret, count - ret, 559 "PFN 0x%lx type %s Blo 564 "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n", 560 pfn, 565 pfn, 561 migratetype_names[page 566 migratetype_names[page_mt], 562 pfn >> pageblock_order 567 pfn >> pageblock_order, 563 migratetype_names[page 568 migratetype_names[pageblock_mt], 564 &page->flags); 569 &page->flags); 565 570 566 ret += stack_depot_snprint(handle, kbu 571 ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0); 567 if (ret >= count) 572 if (ret >= count) 568 goto err; 573 goto err; 569 574 570 if (page_owner->last_migrate_reason != 575 if (page_owner->last_migrate_reason != -1) { 571 ret += scnprintf(kbuf + ret, c 576 ret += scnprintf(kbuf + ret, count - ret, 572 "Page has been migrate 577 "Page has been migrated, last migrate reason: %s\n", 573 migrate_reason_names[p 578 migrate_reason_names[page_owner->last_migrate_reason]); 574 } 579 } 575 580 576 ret = print_page_owner_memcg(kbuf, cou 581 ret = print_page_owner_memcg(kbuf, count, ret, page); 577 582 578 ret += snprintf(kbuf + ret, count - re 583 ret += snprintf(kbuf + ret, count - ret, "\n"); 579 if (ret >= count) 584 if (ret >= count) 580 goto err; 585 goto err; 581 586 582 if (copy_to_user(buf, kbuf, ret)) 587 if (copy_to_user(buf, kbuf, ret)) 583 ret = -EFAULT; 588 ret = -EFAULT; 584 589 585 kfree(kbuf); 590 kfree(kbuf); 586 return ret; 591 return ret; 587 592 588 err: 593 err: 589 kfree(kbuf); 594 kfree(kbuf); 590 return -ENOMEM; 595 return -ENOMEM; 591 } 596 } 592 597 593 void __dump_page_owner(const struct page *page 598 void __dump_page_owner(const struct page *page) 594 { 599 { 595 struct page_ext *page_ext = page_ext_g 600 struct page_ext *page_ext = page_ext_get((void *)page); 596 struct page_owner *page_owner; 601 struct page_owner *page_owner; 597 depot_stack_handle_t handle; 602 depot_stack_handle_t handle; 598 gfp_t gfp_mask; 603 gfp_t gfp_mask; 599 int mt; 604 int mt; 600 605 601 if (unlikely(!page_ext)) { 606 if (unlikely(!page_ext)) { 602 pr_alert("There is not page ex 607 pr_alert("There is not page extension available.\n"); 603 return; 608 return; 604 } 609 } 605 610 606 page_owner = get_page_owner(page_ext); 611 page_owner = get_page_owner(page_ext); 607 gfp_mask = page_owner->gfp_mask; 612 gfp_mask = page_owner->gfp_mask; 608 mt = gfp_migratetype(gfp_mask); 613 mt = gfp_migratetype(gfp_mask); 609 614 610 if (!test_bit(PAGE_EXT_OWNER, &page_ex 615 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { 611 pr_alert("page_owner info is n 616 pr_alert("page_owner info is not present (never set?)\n"); 612 page_ext_put(page_ext); 617 page_ext_put(page_ext); 613 return; 618 return; 614 } 619 } 615 620 616 if (test_bit(PAGE_EXT_OWNER_ALLOCATED, 621 if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) 617 pr_alert("page_owner tracks th 622 pr_alert("page_owner tracks the page as allocated\n"); 618 else 623 else 619 pr_alert("page_owner tracks th 624 pr_alert("page_owner tracks the page as freed\n"); 620 625 621 pr_alert("page last allocated via orde 626 pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n", 622 page_owner->order, migratetyp 627 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask, 623 page_owner->pid, page_owner-> 628 page_owner->pid, page_owner->tgid, page_owner->comm, 624 page_owner->ts_nsec, page_own 629 page_owner->ts_nsec, page_owner->free_ts_nsec); 625 630 626 handle = READ_ONCE(page_owner->handle) 631 handle = READ_ONCE(page_owner->handle); 627 if (!handle) 632 if (!handle) 628 pr_alert("page_owner allocatio 633 pr_alert("page_owner allocation stack trace missing\n"); 629 else 634 else 630 stack_depot_print(handle); 635 stack_depot_print(handle); 631 636 632 handle = READ_ONCE(page_owner->free_ha 637 handle = READ_ONCE(page_owner->free_handle); 633 if (!handle) { 638 if (!handle) { 634 pr_alert("page_owner free stac 639 pr_alert("page_owner free stack trace missing\n"); 635 } else { 640 } else { 636 pr_alert("page last free pid % 641 pr_alert("page last free pid %d tgid %d stack trace:\n", 637 page_owner->free_pid 642 page_owner->free_pid, page_owner->free_tgid); 638 stack_depot_print(handle); 643 stack_depot_print(handle); 639 } 644 } 640 645 641 if (page_owner->last_migrate_reason != 646 if (page_owner->last_migrate_reason != -1) 642 pr_alert("page has been migrat 647 pr_alert("page has been migrated, last migrate reason: %s\n", 643 migrate_reason_names[p 648 migrate_reason_names[page_owner->last_migrate_reason]); 644 page_ext_put(page_ext); 649 page_ext_put(page_ext); 645 } 650 } 646 651 647 static ssize_t 652 static ssize_t 648 read_page_owner(struct file *file, char __user 653 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) 649 { 654 { 650 unsigned long pfn; 655 unsigned long pfn; 651 struct page *page; 656 struct page *page; 652 struct page_ext *page_ext; 657 struct page_ext *page_ext; 653 struct page_owner *page_owner; 658 struct page_owner *page_owner; 654 depot_stack_handle_t handle; 659 depot_stack_handle_t handle; 655 660 656 if (!static_branch_unlikely(&page_owne 661 if (!static_branch_unlikely(&page_owner_inited)) 657 return -EINVAL; 662 return -EINVAL; 658 663 659 page = NULL; 664 page = NULL; 660 if (*ppos == 0) 665 if (*ppos == 0) 661 pfn = min_low_pfn; 666 pfn = min_low_pfn; 662 else 667 else 663 pfn = *ppos; 668 pfn = *ppos; 664 /* Find a valid PFN or the start of a 669 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ 665 while (!pfn_valid(pfn) && (pfn & (MAX_ 670 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) 666 pfn++; 671 pfn++; 667 672 668 /* Find an allocated page */ 673 /* Find an allocated page */ 669 for (; pfn < max_pfn; pfn++) { 674 for (; pfn < max_pfn; pfn++) { 670 /* 675 /* 671 * This temporary page_owner i 676 * This temporary page_owner is required so 672 * that we can avoid the conte 677 * that we can avoid the context switches while holding 673 * the rcu lock and copying th 678 * the rcu lock and copying the page owner information to 674 * user through copy_to_user() 679 * user through copy_to_user() or GFP_KERNEL allocations. 675 */ 680 */ 676 struct page_owner page_owner_t 681 struct page_owner page_owner_tmp; 677 682 678 /* 683 /* 679 * If the new page is in a new 684 * If the new page is in a new MAX_ORDER_NR_PAGES area, 680 * validate the area as existi 685 * validate the area as existing, skip it if not 681 */ 686 */ 682 if ((pfn & (MAX_ORDER_NR_PAGES 687 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { 683 pfn += MAX_ORDER_NR_PA 688 pfn += MAX_ORDER_NR_PAGES - 1; 684 continue; 689 continue; 685 } 690 } 686 691 687 page = pfn_to_page(pfn); 692 page = pfn_to_page(pfn); 688 if (PageBuddy(page)) { 693 if (PageBuddy(page)) { 689 unsigned long freepage 694 unsigned long freepage_order = buddy_order_unsafe(page); 690 695 691 if (freepage_order <= 696 if (freepage_order <= MAX_PAGE_ORDER) 692 pfn += (1UL << 697 pfn += (1UL << freepage_order) - 1; 693 continue; 698 continue; 694 } 699 } 695 700 696 page_ext = page_ext_get(page); 701 page_ext = page_ext_get(page); 697 if (unlikely(!page_ext)) 702 if (unlikely(!page_ext)) 698 continue; 703 continue; 699 704 700 /* 705 /* 701 * Some pages could be missed 706 * Some pages could be missed by concurrent allocation or free, 702 * because we don't hold the z 707 * because we don't hold the zone lock. 703 */ 708 */ 704 if (!test_bit(PAGE_EXT_OWNER, 709 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 705 goto ext_put_continue; 710 goto ext_put_continue; 706 711 707 /* 712 /* 708 * Although we do have the inf 713 * Although we do have the info about past allocation of free 709 * pages, it's not relevant fo 714 * pages, it's not relevant for current memory usage. 710 */ 715 */ 711 if (!test_bit(PAGE_EXT_OWNER_A 716 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) 712 goto ext_put_continue; 717 goto ext_put_continue; 713 718 714 page_owner = get_page_owner(pa 719 page_owner = get_page_owner(page_ext); 715 720 716 /* 721 /* 717 * Don't print "tail" pages of 722 * Don't print "tail" pages of high-order allocations as that 718 * would inflate the stats. 723 * would inflate the stats. 719 */ 724 */ 720 if (!IS_ALIGNED(pfn, 1 << page 725 if (!IS_ALIGNED(pfn, 1 << page_owner->order)) 721 goto ext_put_continue; 726 goto ext_put_continue; 722 727 723 /* 728 /* 724 * Access to page_ext->handle 729 * Access to page_ext->handle isn't synchronous so we should 725 * be careful to access it. 730 * be careful to access it. 726 */ 731 */ 727 handle = READ_ONCE(page_owner- 732 handle = READ_ONCE(page_owner->handle); 728 if (!handle) 733 if (!handle) 729 goto ext_put_continue; 734 goto ext_put_continue; 730 735 731 /* Record the next PFN to read 736 /* Record the next PFN to read in the file offset */ 732 *ppos = pfn + 1; 737 *ppos = pfn + 1; 733 738 734 page_owner_tmp = *page_owner; 739 page_owner_tmp = *page_owner; 735 page_ext_put(page_ext); 740 page_ext_put(page_ext); 736 return print_page_owner(buf, c 741 return print_page_owner(buf, count, pfn, page, 737 &page_owner_tm 742 &page_owner_tmp, handle); 738 ext_put_continue: 743 ext_put_continue: 739 page_ext_put(page_ext); 744 page_ext_put(page_ext); 740 } 745 } 741 746 742 return 0; 747 return 0; 743 } 748 } 744 749 745 static loff_t lseek_page_owner(struct file *fi 750 static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig) 746 { 751 { 747 switch (orig) { 752 switch (orig) { 748 case SEEK_SET: 753 case SEEK_SET: 749 file->f_pos = offset; 754 file->f_pos = offset; 750 break; 755 break; 751 case SEEK_CUR: 756 case SEEK_CUR: 752 file->f_pos += offset; 757 file->f_pos += offset; 753 break; 758 break; 754 default: 759 default: 755 return -EINVAL; 760 return -EINVAL; 756 } 761 } 757 return file->f_pos; 762 return file->f_pos; 758 } 763 } 759 764 760 static void init_pages_in_zone(pg_data_t *pgda 765 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) 761 { 766 { 762 unsigned long pfn = zone->zone_start_p 767 unsigned long pfn = zone->zone_start_pfn; 763 unsigned long end_pfn = zone_end_pfn(z 768 unsigned long end_pfn = zone_end_pfn(zone); 764 unsigned long count = 0; 769 unsigned long count = 0; 765 770 766 /* 771 /* 767 * Walk the zone in pageblock_nr_pages 772 * Walk the zone in pageblock_nr_pages steps. If a page block spans 768 * a zone boundary, it will be double 773 * a zone boundary, it will be double counted between zones. This does 769 * not matter as the mixed block count 774 * not matter as the mixed block count will still be correct 770 */ 775 */ 771 for (; pfn < end_pfn; ) { 776 for (; pfn < end_pfn; ) { 772 unsigned long block_end_pfn; 777 unsigned long block_end_pfn; 773 778 774 if (!pfn_valid(pfn)) { 779 if (!pfn_valid(pfn)) { 775 pfn = ALIGN(pfn + 1, M 780 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); 776 continue; 781 continue; 777 } 782 } 778 783 779 block_end_pfn = pageblock_end_ 784 block_end_pfn = pageblock_end_pfn(pfn); 780 block_end_pfn = min(block_end_ 785 block_end_pfn = min(block_end_pfn, end_pfn); 781 786 782 for (; pfn < block_end_pfn; pf 787 for (; pfn < block_end_pfn; pfn++) { 783 struct page *page = pf 788 struct page *page = pfn_to_page(pfn); 784 struct page_ext *page_ 789 struct page_ext *page_ext; 785 790 786 if (page_zone(page) != 791 if (page_zone(page) != zone) 787 continue; 792 continue; 788 793 789 /* 794 /* 790 * To avoid having to 795 * To avoid having to grab zone->lock, be a little 791 * careful when readin 796 * careful when reading buddy page order. The only 792 * danger is that we s 797 * danger is that we skip too much and potentially miss 793 * some early allocate 798 * some early allocated pages, which is better than 794 * heavy lock contenti 799 * heavy lock contention. 795 */ 800 */ 796 if (PageBuddy(page)) { 801 if (PageBuddy(page)) { 797 unsigned long 802 unsigned long order = buddy_order_unsafe(page); 798 803 799 if (order > 0 804 if (order > 0 && order <= MAX_PAGE_ORDER) 800 pfn += 805 pfn += (1UL << order) - 1; 801 continue; 806 continue; 802 } 807 } 803 808 804 if (PageReserved(page) 809 if (PageReserved(page)) 805 continue; 810 continue; 806 811 807 page_ext = page_ext_ge 812 page_ext = page_ext_get(page); 808 if (unlikely(!page_ext 813 if (unlikely(!page_ext)) 809 continue; 814 continue; 810 815 811 /* Maybe overlapping z 816 /* Maybe overlapping zone */ 812 if (test_bit(PAGE_EXT_ 817 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 813 goto ext_put_c 818 goto ext_put_continue; 814 819 815 /* Found early allocat 820 /* Found early allocated page */ 816 __update_page_owner_ha 821 __update_page_owner_handle(page_ext, early_handle, 0, 0, 817 822 -1, local_clock(), current->pid, 818 823 current->tgid, current->comm); 819 count++; 824 count++; 820 ext_put_continue: 825 ext_put_continue: 821 page_ext_put(page_ext) 826 page_ext_put(page_ext); 822 } 827 } 823 cond_resched(); 828 cond_resched(); 824 } 829 } 825 830 826 pr_info("Node %d, zone %8s: page owner 831 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", 827 pgdat->node_id, zone->name, co 832 pgdat->node_id, zone->name, count); 828 } 833 } 829 834 830 static void init_zones_in_node(pg_data_t *pgda 835 static void init_zones_in_node(pg_data_t *pgdat) 831 { 836 { 832 struct zone *zone; 837 struct zone *zone; 833 struct zone *node_zones = pgdat->node_ 838 struct zone *node_zones = pgdat->node_zones; 834 839 835 for (zone = node_zones; zone - node_zo 840 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 836 if (!populated_zone(zone)) 841 if (!populated_zone(zone)) 837 continue; 842 continue; 838 843 839 init_pages_in_zone(pgdat, zone 844 init_pages_in_zone(pgdat, zone); 840 } 845 } 841 } 846 } 842 847 843 static void init_early_allocated_pages(void) 848 static void init_early_allocated_pages(void) 844 { 849 { 845 pg_data_t *pgdat; 850 pg_data_t *pgdat; 846 851 847 for_each_online_pgdat(pgdat) 852 for_each_online_pgdat(pgdat) 848 init_zones_in_node(pgdat); 853 init_zones_in_node(pgdat); 849 } 854 } 850 855 851 static const struct file_operations proc_page_ 856 static const struct file_operations proc_page_owner_operations = { 852 .read = read_page_owner, 857 .read = read_page_owner, 853 .llseek = lseek_page_owner, 858 .llseek = lseek_page_owner, 854 }; 859 }; 855 860 856 static void *stack_start(struct seq_file *m, l 861 static void *stack_start(struct seq_file *m, loff_t *ppos) 857 { 862 { 858 struct stack *stack; 863 struct stack *stack; 859 864 860 if (*ppos == -1UL) 865 if (*ppos == -1UL) 861 return NULL; 866 return NULL; 862 867 863 if (!*ppos) { 868 if (!*ppos) { 864 /* 869 /* 865 * This pairs with smp_store_r 870 * This pairs with smp_store_release() from function 866 * add_stack_record_to_list(), 871 * add_stack_record_to_list(), so we get a consistent 867 * value of stack_list. 872 * value of stack_list. 868 */ 873 */ 869 stack = smp_load_acquire(&stac 874 stack = smp_load_acquire(&stack_list); 870 m->private = stack; 875 m->private = stack; 871 } else { 876 } else { 872 stack = m->private; 877 stack = m->private; 873 } 878 } 874 879 875 return stack; 880 return stack; 876 } 881 } 877 882 878 static void *stack_next(struct seq_file *m, vo 883 static void *stack_next(struct seq_file *m, void *v, loff_t *ppos) 879 { 884 { 880 struct stack *stack = v; 885 struct stack *stack = v; 881 886 882 stack = stack->next; 887 stack = stack->next; 883 *ppos = stack ? *ppos + 1 : -1UL; 888 *ppos = stack ? *ppos + 1 : -1UL; 884 m->private = stack; 889 m->private = stack; 885 890 886 return stack; 891 return stack; 887 } 892 } 888 893 889 static unsigned long page_owner_pages_threshol 894 static unsigned long page_owner_pages_threshold; 890 895 891 static int stack_print(struct seq_file *m, voi 896 static int stack_print(struct seq_file *m, void *v) 892 { 897 { 893 int i, nr_base_pages; 898 int i, nr_base_pages; 894 struct stack *stack = v; 899 struct stack *stack = v; 895 unsigned long *entries; 900 unsigned long *entries; 896 unsigned long nr_entries; 901 unsigned long nr_entries; 897 struct stack_record *stack_record = st 902 struct stack_record *stack_record = stack->stack_record; 898 903 899 if (!stack->stack_record) 904 if (!stack->stack_record) 900 return 0; 905 return 0; 901 906 902 nr_entries = stack_record->size; 907 nr_entries = stack_record->size; 903 entries = stack_record->entries; 908 entries = stack_record->entries; 904 nr_base_pages = refcount_read(&stack_r 909 nr_base_pages = refcount_read(&stack_record->count) - 1; 905 910 906 if (nr_base_pages < 1 || nr_base_pages 911 if (nr_base_pages < 1 || nr_base_pages < page_owner_pages_threshold) 907 return 0; 912 return 0; 908 913 909 for (i = 0; i < nr_entries; i++) 914 for (i = 0; i < nr_entries; i++) 910 seq_printf(m, " %pS\n", (void 915 seq_printf(m, " %pS\n", (void *)entries[i]); 911 seq_printf(m, "nr_base_pages: %d\n\n", 916 seq_printf(m, "nr_base_pages: %d\n\n", nr_base_pages); 912 917 913 return 0; 918 return 0; 914 } 919 } 915 920 916 static void stack_stop(struct seq_file *m, voi 921 static void stack_stop(struct seq_file *m, void *v) 917 { 922 { 918 } 923 } 919 924 920 static const struct seq_operations page_owner_ 925 static const struct seq_operations page_owner_stack_op = { 921 .start = stack_start, 926 .start = stack_start, 922 .next = stack_next, 927 .next = stack_next, 923 .stop = stack_stop, 928 .stop = stack_stop, 924 .show = stack_print 929 .show = stack_print 925 }; 930 }; 926 931 927 static int page_owner_stack_open(struct inode 932 static int page_owner_stack_open(struct inode *inode, struct file *file) 928 { 933 { 929 return seq_open_private(file, &page_ow 934 return seq_open_private(file, &page_owner_stack_op, 0); 930 } 935 } 931 936 932 static const struct file_operations page_owner 937 static const struct file_operations page_owner_stack_operations = { 933 .open = page_owner_stack_ope 938 .open = page_owner_stack_open, 934 .read = seq_read, 939 .read = seq_read, 935 .llseek = seq_lseek, 940 .llseek = seq_lseek, 936 .release = seq_release, 941 .release = seq_release, 937 }; 942 }; 938 943 939 static int page_owner_threshold_get(void *data 944 static int page_owner_threshold_get(void *data, u64 *val) 940 { 945 { 941 *val = READ_ONCE(page_owner_pages_thre 946 *val = READ_ONCE(page_owner_pages_threshold); 942 return 0; 947 return 0; 943 } 948 } 944 949 945 static int page_owner_threshold_set(void *data 950 static int page_owner_threshold_set(void *data, u64 val) 946 { 951 { 947 WRITE_ONCE(page_owner_pages_threshold, 952 WRITE_ONCE(page_owner_pages_threshold, val); 948 return 0; 953 return 0; 949 } 954 } 950 955 951 DEFINE_SIMPLE_ATTRIBUTE(proc_page_owner_thresh 956 DEFINE_SIMPLE_ATTRIBUTE(proc_page_owner_threshold, &page_owner_threshold_get, 952 &page_owner_threshold_ 957 &page_owner_threshold_set, "%llu"); 953 958 954 959 955 static int __init pageowner_init(void) 960 static int __init pageowner_init(void) 956 { 961 { 957 struct dentry *dir; 962 struct dentry *dir; 958 963 959 if (!static_branch_unlikely(&page_owne 964 if (!static_branch_unlikely(&page_owner_inited)) { 960 pr_info("page_owner is disable 965 pr_info("page_owner is disabled\n"); 961 return 0; 966 return 0; 962 } 967 } 963 968 964 debugfs_create_file("page_owner", 0400 969 debugfs_create_file("page_owner", 0400, NULL, NULL, 965 &proc_page_owner_o 970 &proc_page_owner_operations); 966 dir = debugfs_create_dir("page_owner_s 971 dir = debugfs_create_dir("page_owner_stacks", NULL); 967 debugfs_create_file("show_stacks", 040 972 debugfs_create_file("show_stacks", 0400, dir, NULL, 968 &page_owner_stack_ 973 &page_owner_stack_operations); 969 debugfs_create_file("count_threshold", 974 debugfs_create_file("count_threshold", 0600, dir, NULL, 970 &proc_page_owner_t 975 &proc_page_owner_threshold); 971 976 972 return 0; 977 return 0; 973 } 978 } 974 late_initcall(pageowner_init) 979 late_initcall(pageowner_init) 975 980
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.