~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/damon/paddr.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * DAMON Primitives for The Physical Address Space
  4  *
  5  * Author: SeongJae Park <sj@kernel.org>
  6  */
  7 
  8 #define pr_fmt(fmt) "damon-pa: " fmt
  9 
 10 #include <linux/mmu_notifier.h>
 11 #include <linux/page_idle.h>
 12 #include <linux/pagemap.h>
 13 #include <linux/rmap.h>
 14 #include <linux/swap.h>
 15 #include <linux/memory-tiers.h>
 16 #include <linux/migrate.h>
 17 #include <linux/mm_inline.h>
 18 
 19 #include "../internal.h"
 20 #include "ops-common.h"
 21 
 22 static bool damon_folio_mkold_one(struct folio *folio,
 23                 struct vm_area_struct *vma, unsigned long addr, void *arg)
 24 {
 25         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 26 
 27         while (page_vma_mapped_walk(&pvmw)) {
 28                 addr = pvmw.address;
 29                 if (pvmw.pte)
 30                         damon_ptep_mkold(pvmw.pte, vma, addr);
 31                 else
 32                         damon_pmdp_mkold(pvmw.pmd, vma, addr);
 33         }
 34         return true;
 35 }
 36 
 37 static void damon_folio_mkold(struct folio *folio)
 38 {
 39         struct rmap_walk_control rwc = {
 40                 .rmap_one = damon_folio_mkold_one,
 41                 .anon_lock = folio_lock_anon_vma_read,
 42         };
 43         bool need_lock;
 44 
 45         if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 46                 folio_set_idle(folio);
 47                 return;
 48         }
 49 
 50         need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
 51         if (need_lock && !folio_trylock(folio))
 52                 return;
 53 
 54         rmap_walk(folio, &rwc);
 55 
 56         if (need_lock)
 57                 folio_unlock(folio);
 58 
 59 }
 60 
 61 static void damon_pa_mkold(unsigned long paddr)
 62 {
 63         struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
 64 
 65         if (!folio)
 66                 return;
 67 
 68         damon_folio_mkold(folio);
 69         folio_put(folio);
 70 }
 71 
 72 static void __damon_pa_prepare_access_check(struct damon_region *r)
 73 {
 74         r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
 75 
 76         damon_pa_mkold(r->sampling_addr);
 77 }
 78 
 79 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
 80 {
 81         struct damon_target *t;
 82         struct damon_region *r;
 83 
 84         damon_for_each_target(t, ctx) {
 85                 damon_for_each_region(r, t)
 86                         __damon_pa_prepare_access_check(r);
 87         }
 88 }
 89 
 90 static bool damon_folio_young_one(struct folio *folio,
 91                 struct vm_area_struct *vma, unsigned long addr, void *arg)
 92 {
 93         bool *accessed = arg;
 94         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 95 
 96         *accessed = false;
 97         while (page_vma_mapped_walk(&pvmw)) {
 98                 addr = pvmw.address;
 99                 if (pvmw.pte) {
100                         *accessed = pte_young(ptep_get(pvmw.pte)) ||
101                                 !folio_test_idle(folio) ||
102                                 mmu_notifier_test_young(vma->vm_mm, addr);
103                 } else {
104 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
105                         *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
106                                 !folio_test_idle(folio) ||
107                                 mmu_notifier_test_young(vma->vm_mm, addr);
108 #else
109                         WARN_ON_ONCE(1);
110 #endif  /* CONFIG_TRANSPARENT_HUGEPAGE */
111                 }
112                 if (*accessed) {
113                         page_vma_mapped_walk_done(&pvmw);
114                         break;
115                 }
116         }
117 
118         /* If accessed, stop walking */
119         return *accessed == false;
120 }
121 
122 static bool damon_folio_young(struct folio *folio)
123 {
124         bool accessed = false;
125         struct rmap_walk_control rwc = {
126                 .arg = &accessed,
127                 .rmap_one = damon_folio_young_one,
128                 .anon_lock = folio_lock_anon_vma_read,
129         };
130         bool need_lock;
131 
132         if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
133                 if (folio_test_idle(folio))
134                         return false;
135                 else
136                         return true;
137         }
138 
139         need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
140         if (need_lock && !folio_trylock(folio))
141                 return false;
142 
143         rmap_walk(folio, &rwc);
144 
145         if (need_lock)
146                 folio_unlock(folio);
147 
148         return accessed;
149 }
150 
151 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
152 {
153         struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
154         bool accessed;
155 
156         if (!folio)
157                 return false;
158 
159         accessed = damon_folio_young(folio);
160         *folio_sz = folio_size(folio);
161         folio_put(folio);
162         return accessed;
163 }
164 
165 static void __damon_pa_check_access(struct damon_region *r,
166                 struct damon_attrs *attrs)
167 {
168         static unsigned long last_addr;
169         static unsigned long last_folio_sz = PAGE_SIZE;
170         static bool last_accessed;
171 
172         /* If the region is in the last checked page, reuse the result */
173         if (ALIGN_DOWN(last_addr, last_folio_sz) ==
174                                 ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
175                 damon_update_region_access_rate(r, last_accessed, attrs);
176                 return;
177         }
178 
179         last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
180         damon_update_region_access_rate(r, last_accessed, attrs);
181 
182         last_addr = r->sampling_addr;
183 }
184 
185 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
186 {
187         struct damon_target *t;
188         struct damon_region *r;
189         unsigned int max_nr_accesses = 0;
190 
191         damon_for_each_target(t, ctx) {
192                 damon_for_each_region(r, t) {
193                         __damon_pa_check_access(r, &ctx->attrs);
194                         max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
195                 }
196         }
197 
198         return max_nr_accesses;
199 }
200 
201 static bool __damos_pa_filter_out(struct damos_filter *filter,
202                 struct folio *folio)
203 {
204         bool matched = false;
205         struct mem_cgroup *memcg;
206 
207         switch (filter->type) {
208         case DAMOS_FILTER_TYPE_ANON:
209                 matched = folio_test_anon(folio);
210                 break;
211         case DAMOS_FILTER_TYPE_MEMCG:
212                 rcu_read_lock();
213                 memcg = folio_memcg_check(folio);
214                 if (!memcg)
215                         matched = false;
216                 else
217                         matched = filter->memcg_id == mem_cgroup_id(memcg);
218                 rcu_read_unlock();
219                 break;
220         case DAMOS_FILTER_TYPE_YOUNG:
221                 matched = damon_folio_young(folio);
222                 if (matched)
223                         damon_folio_mkold(folio);
224                 break;
225         default:
226                 break;
227         }
228 
229         return matched == filter->matching;
230 }
231 
232 /*
233  * damos_pa_filter_out - Return true if the page should be filtered out.
234  */
235 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
236 {
237         struct damos_filter *filter;
238 
239         damos_for_each_filter(filter, scheme) {
240                 if (__damos_pa_filter_out(filter, folio))
241                         return true;
242         }
243         return false;
244 }
245 
246 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
247 {
248         unsigned long addr, applied;
249         LIST_HEAD(folio_list);
250         bool install_young_filter = true;
251         struct damos_filter *filter;
252 
253         /* check access in page level again by default */
254         damos_for_each_filter(filter, s) {
255                 if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
256                         install_young_filter = false;
257                         break;
258                 }
259         }
260         if (install_young_filter) {
261                 filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true);
262                 if (!filter)
263                         return 0;
264                 damos_add_filter(s, filter);
265         }
266 
267         for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
268                 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
269 
270                 if (!folio)
271                         continue;
272 
273                 if (damos_pa_filter_out(s, folio))
274                         goto put_folio;
275 
276                 folio_clear_referenced(folio);
277                 folio_test_clear_young(folio);
278                 if (!folio_isolate_lru(folio))
279                         goto put_folio;
280                 if (folio_test_unevictable(folio))
281                         folio_putback_lru(folio);
282                 else
283                         list_add(&folio->lru, &folio_list);
284 put_folio:
285                 folio_put(folio);
286         }
287         if (install_young_filter)
288                 damos_destroy_filter(filter);
289         applied = reclaim_pages(&folio_list);
290         cond_resched();
291         return applied * PAGE_SIZE;
292 }
293 
294 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
295                 struct damon_region *r, struct damos *s, bool mark_accessed)
296 {
297         unsigned long addr, applied = 0;
298 
299         for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
300                 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
301 
302                 if (!folio)
303                         continue;
304 
305                 if (damos_pa_filter_out(s, folio))
306                         goto put_folio;
307 
308                 if (mark_accessed)
309                         folio_mark_accessed(folio);
310                 else
311                         folio_deactivate(folio);
312                 applied += folio_nr_pages(folio);
313 put_folio:
314                 folio_put(folio);
315         }
316         return applied * PAGE_SIZE;
317 }
318 
319 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
320         struct damos *s)
321 {
322         return damon_pa_mark_accessed_or_deactivate(r, s, true);
323 }
324 
325 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
326         struct damos *s)
327 {
328         return damon_pa_mark_accessed_or_deactivate(r, s, false);
329 }
330 
331 static unsigned int __damon_pa_migrate_folio_list(
332                 struct list_head *migrate_folios, struct pglist_data *pgdat,
333                 int target_nid)
334 {
335         unsigned int nr_succeeded = 0;
336         nodemask_t allowed_mask = NODE_MASK_NONE;
337         struct migration_target_control mtc = {
338                 /*
339                  * Allocate from 'node', or fail quickly and quietly.
340                  * When this happens, 'page' will likely just be discarded
341                  * instead of migrated.
342                  */
343                 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
344                         __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
345                 .nid = target_nid,
346                 .nmask = &allowed_mask
347         };
348 
349         if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
350                 return 0;
351 
352         if (list_empty(migrate_folios))
353                 return 0;
354 
355         /* Migration ignores all cpuset and mempolicy settings */
356         migrate_pages(migrate_folios, alloc_migrate_folio, NULL,
357                       (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
358                       &nr_succeeded);
359 
360         return nr_succeeded;
361 }
362 
363 static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list,
364                                                 struct pglist_data *pgdat,
365                                                 int target_nid)
366 {
367         unsigned int nr_migrated = 0;
368         struct folio *folio;
369         LIST_HEAD(ret_folios);
370         LIST_HEAD(migrate_folios);
371 
372         while (!list_empty(folio_list)) {
373                 struct folio *folio;
374 
375                 cond_resched();
376 
377                 folio = lru_to_folio(folio_list);
378                 list_del(&folio->lru);
379 
380                 if (!folio_trylock(folio))
381                         goto keep;
382 
383                 /* Relocate its contents to another node. */
384                 list_add(&folio->lru, &migrate_folios);
385                 folio_unlock(folio);
386                 continue;
387 keep:
388                 list_add(&folio->lru, &ret_folios);
389         }
390         /* 'folio_list' is always empty here */
391 
392         /* Migrate folios selected for migration */
393         nr_migrated += __damon_pa_migrate_folio_list(
394                         &migrate_folios, pgdat, target_nid);
395         /*
396          * Folios that could not be migrated are still in @migrate_folios.  Add
397          * those back on @folio_list
398          */
399         if (!list_empty(&migrate_folios))
400                 list_splice_init(&migrate_folios, folio_list);
401 
402         try_to_unmap_flush();
403 
404         list_splice(&ret_folios, folio_list);
405 
406         while (!list_empty(folio_list)) {
407                 folio = lru_to_folio(folio_list);
408                 list_del(&folio->lru);
409                 folio_putback_lru(folio);
410         }
411 
412         return nr_migrated;
413 }
414 
415 static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
416                                             int target_nid)
417 {
418         int nid;
419         unsigned long nr_migrated = 0;
420         LIST_HEAD(node_folio_list);
421         unsigned int noreclaim_flag;
422 
423         if (list_empty(folio_list))
424                 return nr_migrated;
425 
426         noreclaim_flag = memalloc_noreclaim_save();
427 
428         nid = folio_nid(lru_to_folio(folio_list));
429         do {
430                 struct folio *folio = lru_to_folio(folio_list);
431 
432                 if (nid == folio_nid(folio)) {
433                         list_move(&folio->lru, &node_folio_list);
434                         continue;
435                 }
436 
437                 nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
438                                                            NODE_DATA(nid),
439                                                            target_nid);
440                 nid = folio_nid(lru_to_folio(folio_list));
441         } while (!list_empty(folio_list));
442 
443         nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
444                                                    NODE_DATA(nid),
445                                                    target_nid);
446 
447         memalloc_noreclaim_restore(noreclaim_flag);
448 
449         return nr_migrated;
450 }
451 
452 static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
453 {
454         unsigned long addr, applied;
455         LIST_HEAD(folio_list);
456 
457         for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
458                 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
459 
460                 if (!folio)
461                         continue;
462 
463                 if (damos_pa_filter_out(s, folio))
464                         goto put_folio;
465 
466                 if (!folio_isolate_lru(folio))
467                         goto put_folio;
468                 list_add(&folio->lru, &folio_list);
469 put_folio:
470                 folio_put(folio);
471         }
472         applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
473         cond_resched();
474         return applied * PAGE_SIZE;
475 }
476 
477 
478 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
479                 struct damon_target *t, struct damon_region *r,
480                 struct damos *scheme)
481 {
482         switch (scheme->action) {
483         case DAMOS_PAGEOUT:
484                 return damon_pa_pageout(r, scheme);
485         case DAMOS_LRU_PRIO:
486                 return damon_pa_mark_accessed(r, scheme);
487         case DAMOS_LRU_DEPRIO:
488                 return damon_pa_deactivate_pages(r, scheme);
489         case DAMOS_MIGRATE_HOT:
490         case DAMOS_MIGRATE_COLD:
491                 return damon_pa_migrate(r, scheme);
492         case DAMOS_STAT:
493                 break;
494         default:
495                 /* DAMOS actions that not yet supported by 'paddr'. */
496                 break;
497         }
498         return 0;
499 }
500 
501 static int damon_pa_scheme_score(struct damon_ctx *context,
502                 struct damon_target *t, struct damon_region *r,
503                 struct damos *scheme)
504 {
505         switch (scheme->action) {
506         case DAMOS_PAGEOUT:
507                 return damon_cold_score(context, r, scheme);
508         case DAMOS_LRU_PRIO:
509                 return damon_hot_score(context, r, scheme);
510         case DAMOS_LRU_DEPRIO:
511                 return damon_cold_score(context, r, scheme);
512         case DAMOS_MIGRATE_HOT:
513                 return damon_hot_score(context, r, scheme);
514         case DAMOS_MIGRATE_COLD:
515                 return damon_cold_score(context, r, scheme);
516         default:
517                 break;
518         }
519 
520         return DAMOS_MAX_SCORE;
521 }
522 
523 static int __init damon_pa_initcall(void)
524 {
525         struct damon_operations ops = {
526                 .id = DAMON_OPS_PADDR,
527                 .init = NULL,
528                 .update = NULL,
529                 .prepare_access_checks = damon_pa_prepare_access_checks,
530                 .check_accesses = damon_pa_check_accesses,
531                 .reset_aggregated = NULL,
532                 .target_valid = NULL,
533                 .cleanup = NULL,
534                 .apply_scheme = damon_pa_apply_scheme,
535                 .get_scheme_score = damon_pa_scheme_score,
536         };
537 
538         return damon_register_ops(&ops);
539 };
540 
541 subsys_initcall(damon_pa_initcall);
542 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php