1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * linux/mm/page_isolation.c 3 * linux/mm/page_isolation.c 4 */ 4 */ 5 5 6 #include <linux/mm.h> 6 #include <linux/mm.h> 7 #include <linux/page-isolation.h> 7 #include <linux/page-isolation.h> 8 #include <linux/pageblock-flags.h> 8 #include <linux/pageblock-flags.h> 9 #include <linux/memory.h> 9 #include <linux/memory.h> 10 #include <linux/hugetlb.h> 10 #include <linux/hugetlb.h> 11 #include <linux/page_owner.h> 11 #include <linux/page_owner.h> 12 #include <linux/migrate.h> 12 #include <linux/migrate.h> 13 #include "internal.h" 13 #include "internal.h" 14 14 15 #define CREATE_TRACE_POINTS 15 #define CREATE_TRACE_POINTS 16 #include <trace/events/page_isolation.h> 16 #include <trace/events/page_isolation.h> 17 17 18 /* !! 18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) 19 * This function checks whether the range [sta << 20 * unmovable pages or not. The range must fall << 21 * consequently belong to a single zone. << 22 * << 23 * PageLRU check without isolation or lru_lock << 24 * MIGRATE_MOVABLE block might include unmovab << 25 * check without lock_page also may miss some << 26 * race condition. So you can't expect this fu << 27 * << 28 * Returns a page without holding a reference. << 29 * dereference that page (e.g., dumping), it h << 30 * cannot get removed (e.g., via memory unplug << 31 * << 32 */ << 33 static struct page *has_unmovable_pages(unsign << 34 int migratetyp << 35 { 19 { 36 struct page *page = pfn_to_page(start_ !! 20 struct page *unmovable = NULL; 37 struct zone *zone = page_zone(page); !! 21 struct zone *zone; 38 unsigned long pfn; << 39 << 40 VM_BUG_ON(pageblock_start_pfn(start_pf << 41 pageblock_start_pfn(end_pfn << 42 << 43 if (is_migrate_cma_page(page)) { << 44 /* << 45 * CMA allocations (alloc_cont << 46 * isolate CMA pageblocks even << 47 * so consider them movable he << 48 */ << 49 if (is_migrate_cma(migratetype << 50 return NULL; << 51 << 52 return page; << 53 } << 54 << 55 for (pfn = start_pfn; pfn < end_pfn; p << 56 page = pfn_to_page(pfn); << 57 << 58 /* << 59 * Both, bootmem allocations a << 60 * PG_reserved and are unmovab << 61 * allocations inside ZONE_MOV << 62 * specifying "movablecore". << 63 */ << 64 if (PageReserved(page)) << 65 return page; << 66 << 67 /* << 68 * If the zone is movable and << 69 * pages then it should be rea << 70 * is movable. << 71 */ << 72 if (zone_idx(zone) == ZONE_MOV << 73 continue; << 74 << 75 /* << 76 * Hugepages are not in LRU li << 77 * THPs are on the LRU, but ne << 78 * We need not scan over tail << 79 * handle each tail page indiv << 80 */ << 81 if (PageHuge(page) || PageTran << 82 struct folio *folio = << 83 unsigned int skip_page << 84 << 85 if (PageHuge(page)) { << 86 if (!hugepage_ << 87 return << 88 } else if (!folio_test << 89 return page; << 90 } << 91 << 92 skip_pages = folio_nr_ << 93 pfn += skip_pages - 1; << 94 continue; << 95 } << 96 << 97 /* << 98 * We can't use page_count wit << 99 * because another CPU can fre << 100 * This check already skips co << 101 * because their page->_refcou << 102 */ << 103 if (!page_ref_count(page)) { << 104 if (PageBuddy(page)) << 105 pfn += (1 << b << 106 continue; << 107 } << 108 << 109 /* << 110 * The HWPoisoned page may be << 111 * page_count() is not 0. << 112 */ << 113 if ((flags & MEMORY_OFFLINE) & << 114 continue; << 115 << 116 /* << 117 * We treat all PageOffline() << 118 * to give drivers a chance to << 119 * in MEM_GOING_OFFLINE in ord << 120 * can be offlined as there ar << 121 * For actually unmovable Page << 122 * not support this, we will f << 123 * move these pages that still << 124 * (false negatives in this fu << 125 */ << 126 if ((flags & MEMORY_OFFLINE) & << 127 continue; << 128 << 129 if (__PageMovable(page) || Pag << 130 continue; << 131 << 132 /* << 133 * If there are RECLAIMABLE pa << 134 * it. But now, memory offlin << 135 * shrink_node_slabs() and it << 136 */ << 137 return page; << 138 } << 139 return NULL; << 140 } << 141 << 142 /* << 143 * This function set pageblock migratetype to << 144 * present in [start_pfn, end_pfn). The pagebl << 145 * [start_pfn, end_pfn). << 146 */ << 147 static int set_migratetype_isolate(struct page << 148 unsigned long start_pf << 149 { << 150 struct zone *zone = page_zone(page); << 151 struct page *unmovable; << 152 unsigned long flags; 22 unsigned long flags; 153 unsigned long check_unmovable_start, c !! 23 int ret = -EBUSY; 154 24 155 if (PageUnaccepted(page)) !! 25 zone = page_zone(page); 156 accept_page(page); << 157 26 158 spin_lock_irqsave(&zone->lock, flags); 27 spin_lock_irqsave(&zone->lock, flags); 159 28 160 /* 29 /* 161 * We assume the caller intended to SE 30 * We assume the caller intended to SET migrate type to isolate. 162 * If it is already set, then someone 31 * If it is already set, then someone else must have raced and 163 * set it before us. !! 32 * set it before us. Return -EBUSY 164 */ 33 */ 165 if (is_migrate_isolate_page(page)) { !! 34 if (is_migrate_isolate_page(page)) 166 spin_unlock_irqrestore(&zone-> !! 35 goto out; 167 return -EBUSY; << 168 } << 169 36 170 /* 37 /* 171 * FIXME: Now, memory hotplug doesn't 38 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 172 * We just check MOVABLE pages. 39 * We just check MOVABLE pages. 173 * << 174 * Pass the intersection of [start_pfn << 175 * to avoid redundant checks. << 176 */ 40 */ 177 check_unmovable_start = max(page_to_pf !! 41 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); 178 check_unmovable_end = min(pageblock_en << 179 end_pfn); << 180 << 181 unmovable = has_unmovable_pages(check_ << 182 migratetype, isol_flag << 183 if (!unmovable) { 42 if (!unmovable) { 184 if (!move_freepages_block_isol !! 43 unsigned long nr_pages; 185 spin_unlock_irqrestore !! 44 int mt = get_pageblock_migratetype(page); 186 return -EBUSY; !! 45 187 } !! 46 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 188 zone->nr_isolate_pageblock++; 47 zone->nr_isolate_pageblock++; 189 spin_unlock_irqrestore(&zone-> !! 48 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, 190 return 0; !! 49 NULL); >> 50 >> 51 __mod_zone_freepage_state(zone, -nr_pages, mt); >> 52 ret = 0; 191 } 53 } 192 54 >> 55 out: 193 spin_unlock_irqrestore(&zone->lock, fl 56 spin_unlock_irqrestore(&zone->lock, flags); 194 if (isol_flags & REPORT_FAILURE) { !! 57 if (!ret) { 195 /* !! 58 drain_all_pages(zone); 196 * printk() with zone->lock he !! 59 } else { 197 * lockdep splat, so defer it !! 60 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); 198 */ !! 61 199 dump_page(unmovable, "unmovabl !! 62 if ((isol_flags & REPORT_FAILURE) && unmovable) >> 63 /* >> 64 * printk() with zone->lock held will likely trigger a >> 65 * lockdep splat, so defer it here. >> 66 */ >> 67 dump_page(unmovable, "unmovable page"); 200 } 68 } 201 69 202 return -EBUSY; !! 70 return ret; 203 } 71 } 204 72 205 static void unset_migratetype_isolate(struct p !! 73 static void unset_migratetype_isolate(struct page *page, unsigned migratetype) 206 { 74 { 207 struct zone *zone; 75 struct zone *zone; 208 unsigned long flags; !! 76 unsigned long flags, nr_pages; 209 bool isolated_page = false; 77 bool isolated_page = false; 210 unsigned int order; 78 unsigned int order; >> 79 unsigned long pfn, buddy_pfn; 211 struct page *buddy; 80 struct page *buddy; 212 81 213 zone = page_zone(page); 82 zone = page_zone(page); 214 spin_lock_irqsave(&zone->lock, flags); 83 spin_lock_irqsave(&zone->lock, flags); 215 if (!is_migrate_isolate_page(page)) 84 if (!is_migrate_isolate_page(page)) 216 goto out; 85 goto out; 217 86 218 /* 87 /* 219 * Because freepage with more than pag 88 * Because freepage with more than pageblock_order on isolated 220 * pageblock is restricted to merge du 89 * pageblock is restricted to merge due to freepage counting problem, 221 * it is possible that there is free b 90 * it is possible that there is free buddy page. 222 * move_freepages_block() doesn't care 91 * move_freepages_block() doesn't care of merge so we need other 223 * approach in order to merge them. Is 92 * approach in order to merge them. Isolation and free will make 224 * these pages to be merged. 93 * these pages to be merged. 225 */ 94 */ 226 if (PageBuddy(page)) { 95 if (PageBuddy(page)) { 227 order = buddy_order(page); !! 96 order = page_order(page); 228 if (order >= pageblock_order & !! 97 if (order >= pageblock_order) { 229 buddy = find_buddy_pag !! 98 pfn = page_to_pfn(page); 230 !! 99 buddy_pfn = __find_buddy_pfn(pfn, order); 231 if (buddy && !is_migra !! 100 buddy = page + (buddy_pfn - pfn); 232 isolated_page !! 101 233 /* !! 102 if (pfn_valid_within(buddy_pfn) && 234 * Isolating a !! 103 !is_migrate_isolate_page(buddy)) { 235 * is expected !! 104 __isolate_free_page(page, order); 236 * apply here. !! 105 isolated_page = true; 237 */ << 238 VM_WARN_ON(!is << 239 } 106 } 240 } 107 } 241 } 108 } 242 109 243 /* 110 /* 244 * If we isolate freepage with more th 111 * If we isolate freepage with more than pageblock_order, there 245 * should be no freepage in the range, 112 * should be no freepage in the range, so we could avoid costly 246 * pageblock scanning for freepage mov 113 * pageblock scanning for freepage moving. 247 * << 248 * We didn't actually touch any of the << 249 * to the tail of the freelist. This i << 250 * onlining - just onlined memory won' << 251 * allocation. << 252 */ 114 */ 253 if (!isolated_page) { 115 if (!isolated_page) { 254 /* !! 116 nr_pages = move_freepages_block(zone, page, migratetype, NULL); 255 * Isolating this block alread !! 117 __mod_zone_freepage_state(zone, nr_pages, migratetype); 256 * should not fail on zone bou << 257 */ << 258 WARN_ON_ONCE(!move_freepages_b << 259 } else { << 260 set_pageblock_migratetype(page << 261 __putback_isolated_page(page, << 262 } 118 } >> 119 set_pageblock_migratetype(page, migratetype); 263 zone->nr_isolate_pageblock--; 120 zone->nr_isolate_pageblock--; 264 out: 121 out: 265 spin_unlock_irqrestore(&zone->lock, fl 122 spin_unlock_irqrestore(&zone->lock, flags); >> 123 if (isolated_page) { >> 124 post_alloc_hook(page, order, __GFP_MOVABLE); >> 125 __free_pages(page, order); >> 126 } 266 } 127 } 267 128 268 static inline struct page * 129 static inline struct page * 269 __first_valid_page(unsigned long pfn, unsigned 130 __first_valid_page(unsigned long pfn, unsigned long nr_pages) 270 { 131 { 271 int i; 132 int i; 272 133 273 for (i = 0; i < nr_pages; i++) { 134 for (i = 0; i < nr_pages; i++) { 274 struct page *page; 135 struct page *page; 275 136 276 page = pfn_to_online_page(pfn 137 page = pfn_to_online_page(pfn + i); 277 if (!page) 138 if (!page) 278 continue; 139 continue; 279 return page; 140 return page; 280 } 141 } 281 return NULL; 142 return NULL; 282 } 143 } 283 144 284 /** 145 /** 285 * isolate_single_pageblock() -- tries to isol !! 146 * start_isolate_page_range() - make page-allocation-type of range of pages to 286 * within a free or in-use page. !! 147 * be MIGRATE_ISOLATE. 287 * @boundary_pfn: pageblock-alig !! 148 * @start_pfn: The lower PFN of the range to be isolated. 288 * @flags: isolation flag !! 149 * @end_pfn: The upper PFN of the range to be isolated. 289 * @gfp_flags: GFP flags used !! 150 * start_pfn/end_pfn must be aligned to pageblock_order. 290 * @isolate_before: isolate the pageblock << 291 * @skip_isolation: the flag to skip the p << 292 * isolate_single_pageblo << 293 * @migratetype: migrate type to set in << 294 * << 295 * Free and in-use pages can be as big as MAX_ << 296 * pageblock. When not all pageblocks within a << 297 * time, free page accounting can go wrong. Fo << 298 * MAX_PAGE_ORDER = pageblock_order + 1, a MAX << 299 * pagelbocks. << 300 * [ MAX_PAGE_ORDER ] << 301 * [ pageblock0 | pageblock1 ] << 302 * When either pageblock is isolated, if it is << 303 * split into separate migratetype lists, whic << 304 * in-use page and freed later, __free_one_pag << 305 * either. The function handles this by splitt << 306 * the in-use page then splitting the free pag << 307 */ << 308 static int isolate_single_pageblock(unsigned l << 309 gfp_t gfp_flags, bool << 310 int migratetype) << 311 { << 312 unsigned long start_pfn; << 313 unsigned long isolate_pageblock; << 314 unsigned long pfn; << 315 struct zone *zone; << 316 int ret; << 317 << 318 VM_BUG_ON(!pageblock_aligned(boundary_ << 319 << 320 if (isolate_before) << 321 isolate_pageblock = boundary_p << 322 else << 323 isolate_pageblock = boundary_p << 324 << 325 /* << 326 * scan at the beginning of MAX_ORDER_ << 327 * only isolating a subset of pagebloc << 328 * free or in-use page. Also make sure << 329 * are within the same zone. << 330 */ << 331 zone = page_zone(pfn_to_page(isolate_ << 332 start_pfn = max(ALIGN_DOWN(isolate_pa << 333 zone->zo << 334 << 335 if (skip_isolation) { << 336 int mt __maybe_unused = get_pa << 337 << 338 VM_BUG_ON(!is_migrate_isolate( << 339 } else { << 340 ret = set_migratetype_isolate( << 341 flags, isolate << 342 << 343 if (ret) << 344 return ret; << 345 } << 346 << 347 /* << 348 * Bail out early when the to-be-isola << 349 * a free or in-use page across bounda << 350 * << 351 * 1. isolate before boundary_pfn: the << 352 * 2. isolate after boundary_pfn: the << 353 * << 354 * This also ensures correctness. With << 355 * boundary_pfn and [start_pfn, bounda << 356 * __first_valid_page() will return un << 357 * below. << 358 */ << 359 if (isolate_before) { << 360 if (!pfn_to_online_page(bounda << 361 return 0; << 362 } else { << 363 if (!pfn_to_online_page(bounda << 364 return 0; << 365 } << 366 << 367 for (pfn = start_pfn; pfn < boundary_p << 368 struct page *page = __first_va << 369 << 370 VM_BUG_ON(!page); << 371 pfn = page_to_pfn(page); << 372 << 373 if (PageUnaccepted(page)) { << 374 pfn += MAX_ORDER_NR_PA << 375 continue; << 376 } << 377 << 378 if (PageBuddy(page)) { << 379 int order = buddy_orde << 380 << 381 /* move_freepages_bloc << 382 VM_WARN_ON_ONCE(pfn + << 383 << 384 pfn += 1UL << order; << 385 continue; << 386 } << 387 << 388 /* << 389 * If a compound page is strad << 390 * to migrate it out of the wa << 391 * << 392 * We don't have to worry abou << 393 * free page that straddles in << 394 * pages are freed as order-0 << 395 * (currently) do not exceed p << 396 * << 397 * The block of interest has a << 398 * MIGRATE_ISOLATE above, so w << 399 * will free its pages onto th << 400 */ << 401 if (PageCompound(page)) { << 402 struct page *head = co << 403 unsigned long head_pfn << 404 unsigned long nr_pages << 405 << 406 if (head_pfn + nr_page << 407 PageHuge(page)) { << 408 pfn = head_pfn << 409 continue; << 410 } << 411 << 412 /* << 413 * These pages are mov << 414 * not expected to exc << 415 * << 416 * Let us know when th << 417 * proper free and spl << 418 */ << 419 VM_WARN_ON_ONCE_PAGE(P << 420 VM_WARN_ON_ONCE_PAGE(_ << 421 << 422 goto failed; << 423 } << 424 << 425 pfn++; << 426 } << 427 return 0; << 428 failed: << 429 /* restore the original migratetype */ << 430 if (!skip_isolation) << 431 unset_migratetype_isolate(pfn_ << 432 return -EBUSY; << 433 } << 434 << 435 /** << 436 * start_isolate_page_range() - mark page rang << 437 * @start_pfn: The first PFN of the r << 438 * @end_pfn: The last PFN of the ra << 439 * @migratetype: Migrate type to set in 151 * @migratetype: Migrate type to set in error recovery. 440 * @flags: The following flags ar 152 * @flags: The following flags are allowed (they can be combined in 441 * a bit mask) 153 * a bit mask) 442 * MEMORY_OFFLINE - isola 154 * MEMORY_OFFLINE - isolate to offline (!allocate) memory 443 * e.g., 155 * e.g., skip over PageHWPoison() pages 444 * and P << 445 * REPORT_FAILURE - repor 156 * REPORT_FAILURE - report details about the failure to 446 * isolate the range 157 * isolate the range 447 * @gfp_flags: GFP flags used for mig << 448 * range boundaries. << 449 * 158 * 450 * Making page-allocation-type to be MIGRATE_I 159 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in 451 * the range will never be allocated. Any free 160 * the range will never be allocated. Any free pages and pages freed in the 452 * future will not be allocated again. If spec 161 * future will not be allocated again. If specified range includes migrate types 453 * other than MOVABLE or CMA, this will fail w 162 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all 454 * pages in the range finally, the caller have 163 * pages in the range finally, the caller have to free all pages in the range. 455 * test_page_isolated() can be used for test i 164 * test_page_isolated() can be used for test it. 456 * 165 * 457 * The function first tries to isolate the pag << 458 * of the range, since there might be pages ac << 459 * Afterwards, it isolates the rest of the ran << 460 * << 461 * There is no high level synchronization mech 166 * There is no high level synchronization mechanism that prevents two threads 462 * from trying to isolate overlapping ranges. 167 * from trying to isolate overlapping ranges. If this happens, one thread 463 * will notice pageblocks in the overlapping r 168 * will notice pageblocks in the overlapping range already set to isolate. 464 * This happens in set_migratetype_isolate, an 169 * This happens in set_migratetype_isolate, and set_migratetype_isolate 465 * returns an error. We then clean up by resto 170 * returns an error. We then clean up by restoring the migration type on 466 * pageblocks we may have modified and return 171 * pageblocks we may have modified and return -EBUSY to caller. This 467 * prevents two threads from simultaneously wo 172 * prevents two threads from simultaneously working on overlapping ranges. 468 * 173 * 469 * Please note that there is no strong synchro !! 174 * Return: the number of isolated pageblocks on success and -EBUSY if any part 470 * either. Pages might be freed while their pa !! 175 * of range cannot be isolated. 471 * A call to drain_all_pages() after isolation << 472 * in some cases pages might still end up on p << 473 * for their allocation even when they are in << 474 * on how strong of a guarantee the caller nee << 475 * might be used to flush and disable pcplist << 476 * unisolation. << 477 * << 478 * Return: 0 on success and -EBUSY if any part << 479 */ 176 */ 480 int start_isolate_page_range(unsigned long sta 177 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 481 int migratetype, !! 178 unsigned migratetype, int flags) 482 { 179 { 483 unsigned long pfn; 180 unsigned long pfn; >> 181 unsigned long undo_pfn; 484 struct page *page; 182 struct page *page; 485 /* isolation is done at page block gra !! 183 int nr_isolate_pageblock = 0; 486 unsigned long isolate_start = pagebloc !! 184 487 unsigned long isolate_end = pageblock_ !! 185 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 488 int ret; !! 186 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); 489 bool skip_isolation = false; !! 187 490 !! 188 for (pfn = start_pfn; 491 /* isolate [isolate_start, isolate_sta !! 189 pfn < end_pfn; 492 ret = isolate_single_pageblock(isolate << 493 skip_isolation, migrat << 494 if (ret) << 495 return ret; << 496 << 497 if (isolate_start == isolate_end - pag << 498 skip_isolation = true; << 499 << 500 /* isolate [isolate_end - pageblock_nr << 501 ret = isolate_single_pageblock(isolate << 502 skip_isolation, migrat << 503 if (ret) { << 504 unset_migratetype_isolate(pfn_ << 505 return ret; << 506 } << 507 << 508 /* skip isolated pageblocks at the beg << 509 for (pfn = isolate_start + pageblock_n << 510 pfn < isolate_end - pageblock_nr_ << 511 pfn += pageblock_nr_pages) { 190 pfn += pageblock_nr_pages) { 512 page = __first_valid_page(pfn, 191 page = __first_valid_page(pfn, pageblock_nr_pages); 513 if (page && set_migratetype_is !! 192 if (page) { 514 start_ !! 193 if (set_migratetype_isolate(page, migratetype, flags)) { 515 undo_isolate_page_rang !! 194 undo_pfn = pfn; 516 unset_migratetype_isol !! 195 goto undo; 517 pfn_to_page(is !! 196 } 518 migratetype); !! 197 nr_isolate_pageblock++; 519 return -EBUSY; << 520 } 198 } 521 } 199 } 522 return 0; !! 200 return nr_isolate_pageblock; >> 201 undo: >> 202 for (pfn = start_pfn; >> 203 pfn < undo_pfn; >> 204 pfn += pageblock_nr_pages) { >> 205 struct page *page = pfn_to_online_page(pfn); >> 206 if (!page) >> 207 continue; >> 208 unset_migratetype_isolate(page, migratetype); >> 209 } >> 210 >> 211 return -EBUSY; 523 } 212 } 524 213 525 /** !! 214 /* 526 * undo_isolate_page_range - undo effects of s !! 215 * Make isolated pages available again. 527 * @start_pfn: The first PFN of the i << 528 * @end_pfn: The last PFN of the is << 529 * @migratetype: New migrate type to se << 530 * << 531 * This finds every MIGRATE_ISOLATE page block << 532 * and switches it to @migratetype. << 533 */ 216 */ 534 void undo_isolate_page_range(unsigned long sta 217 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 535 int migratetype) !! 218 unsigned migratetype) 536 { 219 { 537 unsigned long pfn; 220 unsigned long pfn; 538 struct page *page; 221 struct page *page; 539 unsigned long isolate_start = pagebloc << 540 unsigned long isolate_end = pageblock_ << 541 222 542 for (pfn = isolate_start; !! 223 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 543 pfn < isolate_end; !! 224 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); >> 225 >> 226 for (pfn = start_pfn; >> 227 pfn < end_pfn; 544 pfn += pageblock_nr_pages) { 228 pfn += pageblock_nr_pages) { 545 page = __first_valid_page(pfn, 229 page = __first_valid_page(pfn, pageblock_nr_pages); 546 if (!page || !is_migrate_isola 230 if (!page || !is_migrate_isolate_page(page)) 547 continue; 231 continue; 548 unset_migratetype_isolate(page 232 unset_migratetype_isolate(page, migratetype); 549 } 233 } 550 } 234 } 551 /* 235 /* 552 * Test all pages in the range is free(means i 236 * Test all pages in the range is free(means isolated) or not. 553 * all pages in [start_pfn...end_pfn) must be 237 * all pages in [start_pfn...end_pfn) must be in the same zone. 554 * zone->lock must be held before call this. 238 * zone->lock must be held before call this. 555 * 239 * 556 * Returns the last tested pfn. 240 * Returns the last tested pfn. 557 */ 241 */ 558 static unsigned long 242 static unsigned long 559 __test_page_isolated_in_pageblock(unsigned lon 243 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, 560 int flags) 244 int flags) 561 { 245 { 562 struct page *page; 246 struct page *page; 563 247 564 while (pfn < end_pfn) { 248 while (pfn < end_pfn) { >> 249 if (!pfn_valid_within(pfn)) { >> 250 pfn++; >> 251 continue; >> 252 } 565 page = pfn_to_page(pfn); 253 page = pfn_to_page(pfn); 566 if (PageBuddy(page)) 254 if (PageBuddy(page)) 567 /* 255 /* 568 * If the page is on a 256 * If the page is on a free list, it has to be on 569 * the correct MIGRATE 257 * the correct MIGRATE_ISOLATE freelist. There is no 570 * simple way to verif 258 * simple way to verify that as VM_BUG_ON(), though. 571 */ 259 */ 572 pfn += 1 << buddy_orde !! 260 pfn += 1 << page_order(page); 573 else if ((flags & MEMORY_OFFLI 261 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) 574 /* A HWPoisoned page c 262 /* A HWPoisoned page cannot be also PageBuddy */ 575 pfn++; 263 pfn++; 576 else if ((flags & MEMORY_OFFLI << 577 !page_count(page)) << 578 /* << 579 * The responsible dri << 580 * pages when offlinin << 581 * reference in MEM_GO << 582 */ << 583 pfn++; << 584 else 264 else 585 break; 265 break; 586 } 266 } 587 267 588 return pfn; 268 return pfn; 589 } 269 } 590 270 591 /** !! 271 /* Caller should ensure that requested range is in a single zone */ 592 * test_pages_isolated - check if pageblocks i << 593 * @start_pfn: The first PFN of the i << 594 * @end_pfn: The first PFN *after* << 595 * @isol_flags: Testing mode flags << 596 * << 597 * This tests if all in the specified range ar << 598 * << 599 * If %MEMORY_OFFLINE is specified in @flags, << 600 * poisoned and offlined pages free as well. << 601 * << 602 * Caller must ensure the requested range does << 603 * << 604 * Returns 0 if true, -EBUSY if one or more pa << 605 */ << 606 int test_pages_isolated(unsigned long start_pf 272 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, 607 int isol_flags) 273 int isol_flags) 608 { 274 { 609 unsigned long pfn, flags; 275 unsigned long pfn, flags; 610 struct page *page; 276 struct page *page; 611 struct zone *zone; 277 struct zone *zone; 612 int ret; << 613 278 614 /* 279 /* 615 * Note: pageblock_nr_pages != MAX_PAG !! 280 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages 616 * pages are not aligned to pageblock_ !! 281 * are not aligned to pageblock_nr_pages. 617 * Then we just check migratetype firs 282 * Then we just check migratetype first. 618 */ 283 */ 619 for (pfn = start_pfn; pfn < end_pfn; p 284 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 620 page = __first_valid_page(pfn, 285 page = __first_valid_page(pfn, pageblock_nr_pages); 621 if (page && !is_migrate_isolat 286 if (page && !is_migrate_isolate_page(page)) 622 break; 287 break; 623 } 288 } 624 page = __first_valid_page(start_pfn, e 289 page = __first_valid_page(start_pfn, end_pfn - start_pfn); 625 if ((pfn < end_pfn) || !page) { !! 290 if ((pfn < end_pfn) || !page) 626 ret = -EBUSY; !! 291 return -EBUSY; 627 goto out; << 628 } << 629 << 630 /* Check all pages are free or marked 292 /* Check all pages are free or marked as ISOLATED */ 631 zone = page_zone(page); 293 zone = page_zone(page); 632 spin_lock_irqsave(&zone->lock, flags); 294 spin_lock_irqsave(&zone->lock, flags); 633 pfn = __test_page_isolated_in_pagebloc 295 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags); 634 spin_unlock_irqrestore(&zone->lock, fl 296 spin_unlock_irqrestore(&zone->lock, flags); 635 297 636 ret = pfn < end_pfn ? -EBUSY : 0; << 637 << 638 out: << 639 trace_test_pages_isolated(start_pfn, e 298 trace_test_pages_isolated(start_pfn, end_pfn, pfn); 640 299 641 return ret; !! 300 return pfn < end_pfn ? -EBUSY : 0; >> 301 } >> 302 >> 303 struct page *alloc_migrate_target(struct page *page, unsigned long private) >> 304 { >> 305 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]); 642 } 306 } 643 307
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.