~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/damon/vaddr.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * DAMON Primitives for Virtual Address Spaces
  4  *
  5  * Author: SeongJae Park <sj@kernel.org>
  6  */
  7 
  8 #define pr_fmt(fmt) "damon-va: " fmt
  9 
 10 #include <linux/highmem.h>
 11 #include <linux/hugetlb.h>
 12 #include <linux/mman.h>
 13 #include <linux/mmu_notifier.h>
 14 #include <linux/page_idle.h>
 15 #include <linux/pagewalk.h>
 16 #include <linux/sched/mm.h>
 17 
 18 #include "ops-common.h"
 19 
 20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
 21 #undef DAMON_MIN_REGION
 22 #define DAMON_MIN_REGION 1
 23 #endif
 24 
 25 /*
 26  * 't->pid' should be the pointer to the relevant 'struct pid' having reference
 27  * count.  Caller must put the returned task, unless it is NULL.
 28  */
 29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
 30 {
 31         return get_pid_task(t->pid, PIDTYPE_PID);
 32 }
 33 
 34 /*
 35  * Get the mm_struct of the given target
 36  *
 37  * Caller _must_ put the mm_struct after use, unless it is NULL.
 38  *
 39  * Returns the mm_struct of the target on success, NULL on failure
 40  */
 41 static struct mm_struct *damon_get_mm(struct damon_target *t)
 42 {
 43         struct task_struct *task;
 44         struct mm_struct *mm;
 45 
 46         task = damon_get_task_struct(t);
 47         if (!task)
 48                 return NULL;
 49 
 50         mm = get_task_mm(task);
 51         put_task_struct(task);
 52         return mm;
 53 }
 54 
 55 /*
 56  * Functions for the initial monitoring target regions construction
 57  */
 58 
 59 /*
 60  * Size-evenly split a region into 'nr_pieces' small regions
 61  *
 62  * Returns 0 on success, or negative error code otherwise.
 63  */
 64 static int damon_va_evenly_split_region(struct damon_target *t,
 65                 struct damon_region *r, unsigned int nr_pieces)
 66 {
 67         unsigned long sz_orig, sz_piece, orig_end;
 68         struct damon_region *n = NULL, *next;
 69         unsigned long start;
 70 
 71         if (!r || !nr_pieces)
 72                 return -EINVAL;
 73 
 74         orig_end = r->ar.end;
 75         sz_orig = damon_sz_region(r);
 76         sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
 77 
 78         if (!sz_piece)
 79                 return -EINVAL;
 80 
 81         r->ar.end = r->ar.start + sz_piece;
 82         next = damon_next_region(r);
 83         for (start = r->ar.end; start + sz_piece <= orig_end;
 84                         start += sz_piece) {
 85                 n = damon_new_region(start, start + sz_piece);
 86                 if (!n)
 87                         return -ENOMEM;
 88                 damon_insert_region(n, r, next, t);
 89                 r = n;
 90         }
 91         /* complement last region for possible rounding error */
 92         if (n)
 93                 n->ar.end = orig_end;
 94 
 95         return 0;
 96 }
 97 
 98 static unsigned long sz_range(struct damon_addr_range *r)
 99 {
100         return r->end - r->start;
101 }
102 
103 /*
104  * Find three regions separated by two biggest unmapped regions
105  *
106  * vma          the head vma of the target address space
107  * regions      an array of three address ranges that results will be saved
108  *
109  * This function receives an address space and finds three regions in it which
110  * separated by the two biggest unmapped regions in the space.  Please refer to
111  * below comments of '__damon_va_init_regions()' function to know why this is
112  * necessary.
113  *
114  * Returns 0 if success, or negative error code otherwise.
115  */
116 static int __damon_va_three_regions(struct mm_struct *mm,
117                                        struct damon_addr_range regions[3])
118 {
119         struct damon_addr_range first_gap = {0}, second_gap = {0};
120         VMA_ITERATOR(vmi, mm, 0);
121         struct vm_area_struct *vma, *prev = NULL;
122         unsigned long start;
123 
124         /*
125          * Find the two biggest gaps so that first_gap > second_gap > others.
126          * If this is too slow, it can be optimised to examine the maple
127          * tree gaps.
128          */
129         rcu_read_lock();
130         for_each_vma(vmi, vma) {
131                 unsigned long gap;
132 
133                 if (!prev) {
134                         start = vma->vm_start;
135                         goto next;
136                 }
137                 gap = vma->vm_start - prev->vm_end;
138 
139                 if (gap > sz_range(&first_gap)) {
140                         second_gap = first_gap;
141                         first_gap.start = prev->vm_end;
142                         first_gap.end = vma->vm_start;
143                 } else if (gap > sz_range(&second_gap)) {
144                         second_gap.start = prev->vm_end;
145                         second_gap.end = vma->vm_start;
146                 }
147 next:
148                 prev = vma;
149         }
150         rcu_read_unlock();
151 
152         if (!sz_range(&second_gap) || !sz_range(&first_gap))
153                 return -EINVAL;
154 
155         /* Sort the two biggest gaps by address */
156         if (first_gap.start > second_gap.start)
157                 swap(first_gap, second_gap);
158 
159         /* Store the result */
160         regions[0].start = ALIGN(start, DAMON_MIN_REGION);
161         regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
162         regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
163         regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
164         regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
165         regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION);
166 
167         return 0;
168 }
169 
170 /*
171  * Get the three regions in the given target (task)
172  *
173  * Returns 0 on success, negative error code otherwise.
174  */
175 static int damon_va_three_regions(struct damon_target *t,
176                                 struct damon_addr_range regions[3])
177 {
178         struct mm_struct *mm;
179         int rc;
180 
181         mm = damon_get_mm(t);
182         if (!mm)
183                 return -EINVAL;
184 
185         mmap_read_lock(mm);
186         rc = __damon_va_three_regions(mm, regions);
187         mmap_read_unlock(mm);
188 
189         mmput(mm);
190         return rc;
191 }
192 
193 /*
194  * Initialize the monitoring target regions for the given target (task)
195  *
196  * t    the given target
197  *
198  * Because only a number of small portions of the entire address space
199  * is actually mapped to the memory and accessed, monitoring the unmapped
200  * regions is wasteful.  That said, because we can deal with small noises,
201  * tracking every mapping is not strictly required but could even incur a high
202  * overhead if the mapping frequently changes or the number of mappings is
203  * high.  The adaptive regions adjustment mechanism will further help to deal
204  * with the noise by simply identifying the unmapped areas as a region that
205  * has no access.  Moreover, applying the real mappings that would have many
206  * unmapped areas inside will make the adaptive mechanism quite complex.  That
207  * said, too huge unmapped areas inside the monitoring target should be removed
208  * to not take the time for the adaptive mechanism.
209  *
210  * For the reason, we convert the complex mappings to three distinct regions
211  * that cover every mapped area of the address space.  Also the two gaps
212  * between the three regions are the two biggest unmapped areas in the given
213  * address space.  In detail, this function first identifies the start and the
214  * end of the mappings and the two biggest unmapped areas of the address space.
215  * Then, it constructs the three regions as below:
216  *
217  *     [mappings[0]->start, big_two_unmapped_areas[0]->start)
218  *     [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
219  *     [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
220  *
221  * As usual memory map of processes is as below, the gap between the heap and
222  * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
223  * region and the stack will be two biggest unmapped regions.  Because these
224  * gaps are exceptionally huge areas in usual address space, excluding these
225  * two biggest unmapped regions will be sufficient to make a trade-off.
226  *
227  *   <heap>
228  *   <BIG UNMAPPED REGION 1>
229  *   <uppermost mmap()-ed region>
230  *   (other mmap()-ed regions and small unmapped regions)
231  *   <lowermost mmap()-ed region>
232  *   <BIG UNMAPPED REGION 2>
233  *   <stack>
234  */
235 static void __damon_va_init_regions(struct damon_ctx *ctx,
236                                      struct damon_target *t)
237 {
238         struct damon_target *ti;
239         struct damon_region *r;
240         struct damon_addr_range regions[3];
241         unsigned long sz = 0, nr_pieces;
242         int i, tidx = 0;
243 
244         if (damon_va_three_regions(t, regions)) {
245                 damon_for_each_target(ti, ctx) {
246                         if (ti == t)
247                                 break;
248                         tidx++;
249                 }
250                 pr_debug("Failed to get three regions of %dth target\n", tidx);
251                 return;
252         }
253 
254         for (i = 0; i < 3; i++)
255                 sz += regions[i].end - regions[i].start;
256         if (ctx->attrs.min_nr_regions)
257                 sz /= ctx->attrs.min_nr_regions;
258         if (sz < DAMON_MIN_REGION)
259                 sz = DAMON_MIN_REGION;
260 
261         /* Set the initial three regions of the target */
262         for (i = 0; i < 3; i++) {
263                 r = damon_new_region(regions[i].start, regions[i].end);
264                 if (!r) {
265                         pr_err("%d'th init region creation failed\n", i);
266                         return;
267                 }
268                 damon_add_region(r, t);
269 
270                 nr_pieces = (regions[i].end - regions[i].start) / sz;
271                 damon_va_evenly_split_region(t, r, nr_pieces);
272         }
273 }
274 
275 /* Initialize '->regions_list' of every target (task) */
276 static void damon_va_init(struct damon_ctx *ctx)
277 {
278         struct damon_target *t;
279 
280         damon_for_each_target(t, ctx) {
281                 /* the user may set the target regions as they want */
282                 if (!damon_nr_regions(t))
283                         __damon_va_init_regions(ctx, t);
284         }
285 }
286 
287 /*
288  * Update regions for current memory mappings
289  */
290 static void damon_va_update(struct damon_ctx *ctx)
291 {
292         struct damon_addr_range three_regions[3];
293         struct damon_target *t;
294 
295         damon_for_each_target(t, ctx) {
296                 if (damon_va_three_regions(t, three_regions))
297                         continue;
298                 damon_set_regions(t, three_regions, 3);
299         }
300 }
301 
302 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
303                 unsigned long next, struct mm_walk *walk)
304 {
305         pte_t *pte;
306         pmd_t pmde;
307         spinlock_t *ptl;
308 
309         if (pmd_trans_huge(pmdp_get(pmd))) {
310                 ptl = pmd_lock(walk->mm, pmd);
311                 pmde = pmdp_get(pmd);
312 
313                 if (!pmd_present(pmde)) {
314                         spin_unlock(ptl);
315                         return 0;
316                 }
317 
318                 if (pmd_trans_huge(pmde)) {
319                         damon_pmdp_mkold(pmd, walk->vma, addr);
320                         spin_unlock(ptl);
321                         return 0;
322                 }
323                 spin_unlock(ptl);
324         }
325 
326         pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
327         if (!pte) {
328                 walk->action = ACTION_AGAIN;
329                 return 0;
330         }
331         if (!pte_present(ptep_get(pte)))
332                 goto out;
333         damon_ptep_mkold(pte, walk->vma, addr);
334 out:
335         pte_unmap_unlock(pte, ptl);
336         return 0;
337 }
338 
339 #ifdef CONFIG_HUGETLB_PAGE
340 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
341                                 struct vm_area_struct *vma, unsigned long addr)
342 {
343         bool referenced = false;
344         pte_t entry = huge_ptep_get(mm, addr, pte);
345         struct folio *folio = pfn_folio(pte_pfn(entry));
346         unsigned long psize = huge_page_size(hstate_vma(vma));
347 
348         folio_get(folio);
349 
350         if (pte_young(entry)) {
351                 referenced = true;
352                 entry = pte_mkold(entry);
353                 set_huge_pte_at(mm, addr, pte, entry, psize);
354         }
355 
356 #ifdef CONFIG_MMU_NOTIFIER
357         if (mmu_notifier_clear_young(mm, addr,
358                                      addr + huge_page_size(hstate_vma(vma))))
359                 referenced = true;
360 #endif /* CONFIG_MMU_NOTIFIER */
361 
362         if (referenced)
363                 folio_set_young(folio);
364 
365         folio_set_idle(folio);
366         folio_put(folio);
367 }
368 
369 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
370                                      unsigned long addr, unsigned long end,
371                                      struct mm_walk *walk)
372 {
373         struct hstate *h = hstate_vma(walk->vma);
374         spinlock_t *ptl;
375         pte_t entry;
376 
377         ptl = huge_pte_lock(h, walk->mm, pte);
378         entry = huge_ptep_get(walk->mm, addr, pte);
379         if (!pte_present(entry))
380                 goto out;
381 
382         damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
383 
384 out:
385         spin_unlock(ptl);
386         return 0;
387 }
388 #else
389 #define damon_mkold_hugetlb_entry NULL
390 #endif /* CONFIG_HUGETLB_PAGE */
391 
392 static const struct mm_walk_ops damon_mkold_ops = {
393         .pmd_entry = damon_mkold_pmd_entry,
394         .hugetlb_entry = damon_mkold_hugetlb_entry,
395         .walk_lock = PGWALK_RDLOCK,
396 };
397 
398 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
399 {
400         mmap_read_lock(mm);
401         walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
402         mmap_read_unlock(mm);
403 }
404 
405 /*
406  * Functions for the access checking of the regions
407  */
408 
409 static void __damon_va_prepare_access_check(struct mm_struct *mm,
410                                         struct damon_region *r)
411 {
412         r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
413 
414         damon_va_mkold(mm, r->sampling_addr);
415 }
416 
417 static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
418 {
419         struct damon_target *t;
420         struct mm_struct *mm;
421         struct damon_region *r;
422 
423         damon_for_each_target(t, ctx) {
424                 mm = damon_get_mm(t);
425                 if (!mm)
426                         continue;
427                 damon_for_each_region(r, t)
428                         __damon_va_prepare_access_check(mm, r);
429                 mmput(mm);
430         }
431 }
432 
433 struct damon_young_walk_private {
434         /* size of the folio for the access checked virtual memory address */
435         unsigned long *folio_sz;
436         bool young;
437 };
438 
439 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
440                 unsigned long next, struct mm_walk *walk)
441 {
442         pte_t *pte;
443         pte_t ptent;
444         spinlock_t *ptl;
445         struct folio *folio;
446         struct damon_young_walk_private *priv = walk->private;
447 
448 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
449         if (pmd_trans_huge(pmdp_get(pmd))) {
450                 pmd_t pmde;
451 
452                 ptl = pmd_lock(walk->mm, pmd);
453                 pmde = pmdp_get(pmd);
454 
455                 if (!pmd_present(pmde)) {
456                         spin_unlock(ptl);
457                         return 0;
458                 }
459 
460                 if (!pmd_trans_huge(pmde)) {
461                         spin_unlock(ptl);
462                         goto regular_page;
463                 }
464                 folio = damon_get_folio(pmd_pfn(pmde));
465                 if (!folio)
466                         goto huge_out;
467                 if (pmd_young(pmde) || !folio_test_idle(folio) ||
468                                         mmu_notifier_test_young(walk->mm,
469                                                 addr))
470                         priv->young = true;
471                 *priv->folio_sz = HPAGE_PMD_SIZE;
472                 folio_put(folio);
473 huge_out:
474                 spin_unlock(ptl);
475                 return 0;
476         }
477 
478 regular_page:
479 #endif  /* CONFIG_TRANSPARENT_HUGEPAGE */
480 
481         pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
482         if (!pte) {
483                 walk->action = ACTION_AGAIN;
484                 return 0;
485         }
486         ptent = ptep_get(pte);
487         if (!pte_present(ptent))
488                 goto out;
489         folio = damon_get_folio(pte_pfn(ptent));
490         if (!folio)
491                 goto out;
492         if (pte_young(ptent) || !folio_test_idle(folio) ||
493                         mmu_notifier_test_young(walk->mm, addr))
494                 priv->young = true;
495         *priv->folio_sz = folio_size(folio);
496         folio_put(folio);
497 out:
498         pte_unmap_unlock(pte, ptl);
499         return 0;
500 }
501 
502 #ifdef CONFIG_HUGETLB_PAGE
503 static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
504                                      unsigned long addr, unsigned long end,
505                                      struct mm_walk *walk)
506 {
507         struct damon_young_walk_private *priv = walk->private;
508         struct hstate *h = hstate_vma(walk->vma);
509         struct folio *folio;
510         spinlock_t *ptl;
511         pte_t entry;
512 
513         ptl = huge_pte_lock(h, walk->mm, pte);
514         entry = huge_ptep_get(walk->mm, addr, pte);
515         if (!pte_present(entry))
516                 goto out;
517 
518         folio = pfn_folio(pte_pfn(entry));
519         folio_get(folio);
520 
521         if (pte_young(entry) || !folio_test_idle(folio) ||
522             mmu_notifier_test_young(walk->mm, addr))
523                 priv->young = true;
524         *priv->folio_sz = huge_page_size(h);
525 
526         folio_put(folio);
527 
528 out:
529         spin_unlock(ptl);
530         return 0;
531 }
532 #else
533 #define damon_young_hugetlb_entry NULL
534 #endif /* CONFIG_HUGETLB_PAGE */
535 
536 static const struct mm_walk_ops damon_young_ops = {
537         .pmd_entry = damon_young_pmd_entry,
538         .hugetlb_entry = damon_young_hugetlb_entry,
539         .walk_lock = PGWALK_RDLOCK,
540 };
541 
542 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
543                 unsigned long *folio_sz)
544 {
545         struct damon_young_walk_private arg = {
546                 .folio_sz = folio_sz,
547                 .young = false,
548         };
549 
550         mmap_read_lock(mm);
551         walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
552         mmap_read_unlock(mm);
553         return arg.young;
554 }
555 
556 /*
557  * Check whether the region was accessed after the last preparation
558  *
559  * mm   'mm_struct' for the given virtual address space
560  * r    the region to be checked
561  */
562 static void __damon_va_check_access(struct mm_struct *mm,
563                                 struct damon_region *r, bool same_target,
564                                 struct damon_attrs *attrs)
565 {
566         static unsigned long last_addr;
567         static unsigned long last_folio_sz = PAGE_SIZE;
568         static bool last_accessed;
569 
570         if (!mm) {
571                 damon_update_region_access_rate(r, false, attrs);
572                 return;
573         }
574 
575         /* If the region is in the last checked page, reuse the result */
576         if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
577                                 ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
578                 damon_update_region_access_rate(r, last_accessed, attrs);
579                 return;
580         }
581 
582         last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
583         damon_update_region_access_rate(r, last_accessed, attrs);
584 
585         last_addr = r->sampling_addr;
586 }
587 
588 static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
589 {
590         struct damon_target *t;
591         struct mm_struct *mm;
592         struct damon_region *r;
593         unsigned int max_nr_accesses = 0;
594         bool same_target;
595 
596         damon_for_each_target(t, ctx) {
597                 mm = damon_get_mm(t);
598                 same_target = false;
599                 damon_for_each_region(r, t) {
600                         __damon_va_check_access(mm, r, same_target,
601                                         &ctx->attrs);
602                         max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
603                         same_target = true;
604                 }
605                 if (mm)
606                         mmput(mm);
607         }
608 
609         return max_nr_accesses;
610 }
611 
612 /*
613  * Functions for the target validity check and cleanup
614  */
615 
616 static bool damon_va_target_valid(struct damon_target *t)
617 {
618         struct task_struct *task;
619 
620         task = damon_get_task_struct(t);
621         if (task) {
622                 put_task_struct(task);
623                 return true;
624         }
625 
626         return false;
627 }
628 
629 #ifndef CONFIG_ADVISE_SYSCALLS
630 static unsigned long damos_madvise(struct damon_target *target,
631                 struct damon_region *r, int behavior)
632 {
633         return 0;
634 }
635 #else
636 static unsigned long damos_madvise(struct damon_target *target,
637                 struct damon_region *r, int behavior)
638 {
639         struct mm_struct *mm;
640         unsigned long start = PAGE_ALIGN(r->ar.start);
641         unsigned long len = PAGE_ALIGN(damon_sz_region(r));
642         unsigned long applied;
643 
644         mm = damon_get_mm(target);
645         if (!mm)
646                 return 0;
647 
648         applied = do_madvise(mm, start, len, behavior) ? 0 : len;
649         mmput(mm);
650 
651         return applied;
652 }
653 #endif  /* CONFIG_ADVISE_SYSCALLS */
654 
655 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
656                 struct damon_target *t, struct damon_region *r,
657                 struct damos *scheme)
658 {
659         int madv_action;
660 
661         switch (scheme->action) {
662         case DAMOS_WILLNEED:
663                 madv_action = MADV_WILLNEED;
664                 break;
665         case DAMOS_COLD:
666                 madv_action = MADV_COLD;
667                 break;
668         case DAMOS_PAGEOUT:
669                 madv_action = MADV_PAGEOUT;
670                 break;
671         case DAMOS_HUGEPAGE:
672                 madv_action = MADV_HUGEPAGE;
673                 break;
674         case DAMOS_NOHUGEPAGE:
675                 madv_action = MADV_NOHUGEPAGE;
676                 break;
677         case DAMOS_STAT:
678                 return 0;
679         default:
680                 /*
681                  * DAMOS actions that are not yet supported by 'vaddr'.
682                  */
683                 return 0;
684         }
685 
686         return damos_madvise(t, r, madv_action);
687 }
688 
689 static int damon_va_scheme_score(struct damon_ctx *context,
690                 struct damon_target *t, struct damon_region *r,
691                 struct damos *scheme)
692 {
693 
694         switch (scheme->action) {
695         case DAMOS_PAGEOUT:
696                 return damon_cold_score(context, r, scheme);
697         default:
698                 break;
699         }
700 
701         return DAMOS_MAX_SCORE;
702 }
703 
704 static int __init damon_va_initcall(void)
705 {
706         struct damon_operations ops = {
707                 .id = DAMON_OPS_VADDR,
708                 .init = damon_va_init,
709                 .update = damon_va_update,
710                 .prepare_access_checks = damon_va_prepare_access_checks,
711                 .check_accesses = damon_va_check_accesses,
712                 .reset_aggregated = NULL,
713                 .target_valid = damon_va_target_valid,
714                 .cleanup = NULL,
715                 .apply_scheme = damon_va_apply_scheme,
716                 .get_scheme_score = damon_va_scheme_score,
717         };
718         /* ops for fixed virtual address ranges */
719         struct damon_operations ops_fvaddr = ops;
720         int err;
721 
722         /* Don't set the monitoring target regions for the entire mapping */
723         ops_fvaddr.id = DAMON_OPS_FVADDR;
724         ops_fvaddr.init = NULL;
725         ops_fvaddr.update = NULL;
726 
727         err = damon_register_ops(&ops);
728         if (err)
729                 return err;
730         return damon_register_ops(&ops_fvaddr);
731 };
732 
733 subsys_initcall(damon_va_initcall);
734 
735 #include "vaddr-test.h"
736 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php