~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/damon/vaddr.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/damon/vaddr.c (Version linux-6.12-rc7) and /mm/damon/vaddr.c (Version linux-4.18.20)


  1 // SPDX-License-Identifier: GPL-2.0                 1 
  2 /*                                                
  3  * DAMON Primitives for Virtual Address Spaces    
  4  *                                                
  5  * Author: SeongJae Park <sj@kernel.org>          
  6  */                                               
  7                                                   
  8 #define pr_fmt(fmt) "damon-va: " fmt              
  9                                                   
 10 #include <linux/highmem.h>                        
 11 #include <linux/hugetlb.h>                        
 12 #include <linux/mman.h>                           
 13 #include <linux/mmu_notifier.h>                   
 14 #include <linux/page_idle.h>                      
 15 #include <linux/pagewalk.h>                       
 16 #include <linux/sched/mm.h>                       
 17                                                   
 18 #include "ops-common.h"                           
 19                                                   
 20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST              
 21 #undef DAMON_MIN_REGION                           
 22 #define DAMON_MIN_REGION 1                        
 23 #endif                                            
 24                                                   
 25 /*                                                
 26  * 't->pid' should be the pointer to the relev    
 27  * count.  Caller must put the returned task,     
 28  */                                               
 29 static inline struct task_struct *damon_get_ta    
 30 {                                                 
 31         return get_pid_task(t->pid, PIDTYPE_PI    
 32 }                                                 
 33                                                   
 34 /*                                                
 35  * Get the mm_struct of the given target          
 36  *                                                
 37  * Caller _must_ put the mm_struct after use,     
 38  *                                                
 39  * Returns the mm_struct of the target on succ    
 40  */                                               
 41 static struct mm_struct *damon_get_mm(struct d    
 42 {                                                 
 43         struct task_struct *task;                 
 44         struct mm_struct *mm;                     
 45                                                   
 46         task = damon_get_task_struct(t);          
 47         if (!task)                                
 48                 return NULL;                      
 49                                                   
 50         mm = get_task_mm(task);                   
 51         put_task_struct(task);                    
 52         return mm;                                
 53 }                                                 
 54                                                   
 55 /*                                                
 56  * Functions for the initial monitoring target    
 57  */                                               
 58                                                   
 59 /*                                                
 60  * Size-evenly split a region into 'nr_pieces'    
 61  *                                                
 62  * Returns 0 on success, or negative error cod    
 63  */                                               
 64 static int damon_va_evenly_split_region(struct    
 65                 struct damon_region *r, unsign    
 66 {                                                 
 67         unsigned long sz_orig, sz_piece, orig_    
 68         struct damon_region *n = NULL, *next;     
 69         unsigned long start;                      
 70                                                   
 71         if (!r || !nr_pieces)                     
 72                 return -EINVAL;                   
 73                                                   
 74         orig_end = r->ar.end;                     
 75         sz_orig = damon_sz_region(r);             
 76         sz_piece = ALIGN_DOWN(sz_orig / nr_pie    
 77                                                   
 78         if (!sz_piece)                            
 79                 return -EINVAL;                   
 80                                                   
 81         r->ar.end = r->ar.start + sz_piece;       
 82         next = damon_next_region(r);              
 83         for (start = r->ar.end; start + sz_pie    
 84                         start += sz_piece) {      
 85                 n = damon_new_region(start, st    
 86                 if (!n)                           
 87                         return -ENOMEM;           
 88                 damon_insert_region(n, r, next    
 89                 r = n;                            
 90         }                                         
 91         /* complement last region for possible    
 92         if (n)                                    
 93                 n->ar.end = orig_end;             
 94                                                   
 95         return 0;                                 
 96 }                                                 
 97                                                   
 98 static unsigned long sz_range(struct damon_add    
 99 {                                                 
100         return r->end - r->start;                 
101 }                                                 
102                                                   
103 /*                                                
104  * Find three regions separated by two biggest    
105  *                                                
106  * vma          the head vma of the target add    
107  * regions      an array of three address rang    
108  *                                                
109  * This function receives an address space and    
110  * separated by the two biggest unmapped regio    
111  * below comments of '__damon_va_init_regions(    
112  * necessary.                                     
113  *                                                
114  * Returns 0 if success, or negative error cod    
115  */                                               
116 static int __damon_va_three_regions(struct mm_    
117                                        struct     
118 {                                                 
119         struct damon_addr_range first_gap = {0    
120         VMA_ITERATOR(vmi, mm, 0);                 
121         struct vm_area_struct *vma, *prev = NU    
122         unsigned long start;                      
123                                                   
124         /*                                        
125          * Find the two biggest gaps so that f    
126          * If this is too slow, it can be opti    
127          * tree gaps.                             
128          */                                       
129         rcu_read_lock();                          
130         for_each_vma(vmi, vma) {                  
131                 unsigned long gap;                
132                                                   
133                 if (!prev) {                      
134                         start = vma->vm_start;    
135                         goto next;                
136                 }                                 
137                 gap = vma->vm_start - prev->vm    
138                                                   
139                 if (gap > sz_range(&first_gap)    
140                         second_gap = first_gap    
141                         first_gap.start = prev    
142                         first_gap.end = vma->v    
143                 } else if (gap > sz_range(&sec    
144                         second_gap.start = pre    
145                         second_gap.end = vma->    
146                 }                                 
147 next:                                             
148                 prev = vma;                       
149         }                                         
150         rcu_read_unlock();                        
151                                                   
152         if (!sz_range(&second_gap) || !sz_rang    
153                 return -EINVAL;                   
154                                                   
155         /* Sort the two biggest gaps by addres    
156         if (first_gap.start > second_gap.start    
157                 swap(first_gap, second_gap);      
158                                                   
159         /* Store the result */                    
160         regions[0].start = ALIGN(start, DAMON_    
161         regions[0].end = ALIGN(first_gap.start    
162         regions[1].start = ALIGN(first_gap.end    
163         regions[1].end = ALIGN(second_gap.star    
164         regions[2].start = ALIGN(second_gap.en    
165         regions[2].end = ALIGN(prev->vm_end, D    
166                                                   
167         return 0;                                 
168 }                                                 
169                                                   
170 /*                                                
171  * Get the three regions in the given target (    
172  *                                                
173  * Returns 0 on success, negative error code o    
174  */                                               
175 static int damon_va_three_regions(struct damon    
176                                 struct damon_a    
177 {                                                 
178         struct mm_struct *mm;                     
179         int rc;                                   
180                                                   
181         mm = damon_get_mm(t);                     
182         if (!mm)                                  
183                 return -EINVAL;                   
184                                                   
185         mmap_read_lock(mm);                       
186         rc = __damon_va_three_regions(mm, regi    
187         mmap_read_unlock(mm);                     
188                                                   
189         mmput(mm);                                
190         return rc;                                
191 }                                                 
192                                                   
193 /*                                                
194  * Initialize the monitoring target regions fo    
195  *                                                
196  * t    the given target                          
197  *                                                
198  * Because only a number of small portions of     
199  * is actually mapped to the memory and access    
200  * regions is wasteful.  That said, because we    
201  * tracking every mapping is not strictly requ    
202  * overhead if the mapping frequently changes     
203  * high.  The adaptive regions adjustment mech    
204  * with the noise by simply identifying the un    
205  * has no access.  Moreover, applying the real    
206  * unmapped areas inside will make the adaptiv    
207  * said, too huge unmapped areas inside the mo    
208  * to not take the time for the adaptive mecha    
209  *                                                
210  * For the reason, we convert the complex mapp    
211  * that cover every mapped area of the address    
212  * between the three regions are the two bigge    
213  * address space.  In detail, this function fi    
214  * end of the mappings and the two biggest unm    
215  * Then, it constructs the three regions as be    
216  *                                                
217  *     [mappings[0]->start, big_two_unmapped_a    
218  *     [big_two_unmapped_areas[0]->end, big_tw    
219  *     [big_two_unmapped_areas[1]->end, mappin    
220  *                                                
221  * As usual memory map of processes is as belo    
222  * the uppermost mmap()-ed region, and the gap    
223  * region and the stack will be two biggest un    
224  * gaps are exceptionally huge areas in usual     
225  * two biggest unmapped regions will be suffic    
226  *                                                
227  *   <heap>                                       
228  *   <BIG UNMAPPED REGION 1>                      
229  *   <uppermost mmap()-ed region>                 
230  *   (other mmap()-ed regions and small unmapp    
231  *   <lowermost mmap()-ed region>                 
232  *   <BIG UNMAPPED REGION 2>                      
233  *   <stack>                                      
234  */                                               
235 static void __damon_va_init_regions(struct dam    
236                                      struct da    
237 {                                                 
238         struct damon_target *ti;                  
239         struct damon_region *r;                   
240         struct damon_addr_range regions[3];       
241         unsigned long sz = 0, nr_pieces;          
242         int i, tidx = 0;                          
243                                                   
244         if (damon_va_three_regions(t, regions)    
245                 damon_for_each_target(ti, ctx)    
246                         if (ti == t)              
247                                 break;            
248                         tidx++;                   
249                 }                                 
250                 pr_debug("Failed to get three     
251                 return;                           
252         }                                         
253                                                   
254         for (i = 0; i < 3; i++)                   
255                 sz += regions[i].end - regions    
256         if (ctx->attrs.min_nr_regions)            
257                 sz /= ctx->attrs.min_nr_region    
258         if (sz < DAMON_MIN_REGION)                
259                 sz = DAMON_MIN_REGION;            
260                                                   
261         /* Set the initial three regions of th    
262         for (i = 0; i < 3; i++) {                 
263                 r = damon_new_region(regions[i    
264                 if (!r) {                         
265                         pr_err("%d'th init reg    
266                         return;                   
267                 }                                 
268                 damon_add_region(r, t);           
269                                                   
270                 nr_pieces = (regions[i].end -     
271                 damon_va_evenly_split_region(t    
272         }                                         
273 }                                                 
274                                                   
275 /* Initialize '->regions_list' of every target    
276 static void damon_va_init(struct damon_ctx *ct    
277 {                                                 
278         struct damon_target *t;                   
279                                                   
280         damon_for_each_target(t, ctx) {           
281                 /* the user may set the target    
282                 if (!damon_nr_regions(t))         
283                         __damon_va_init_region    
284         }                                         
285 }                                                 
286                                                   
287 /*                                                
288  * Update regions for current memory mappings     
289  */                                               
290 static void damon_va_update(struct damon_ctx *    
291 {                                                 
292         struct damon_addr_range three_regions[    
293         struct damon_target *t;                   
294                                                   
295         damon_for_each_target(t, ctx) {           
296                 if (damon_va_three_regions(t,     
297                         continue;                 
298                 damon_set_regions(t, three_reg    
299         }                                         
300 }                                                 
301                                                   
302 static int damon_mkold_pmd_entry(pmd_t *pmd, u    
303                 unsigned long next, struct mm_    
304 {                                                 
305         pte_t *pte;                               
306         pmd_t pmde;                               
307         spinlock_t *ptl;                          
308                                                   
309         if (pmd_trans_huge(pmdp_get(pmd))) {      
310                 ptl = pmd_lock(walk->mm, pmd);    
311                 pmde = pmdp_get(pmd);             
312                                                   
313                 if (!pmd_present(pmde)) {         
314                         spin_unlock(ptl);         
315                         return 0;                 
316                 }                                 
317                                                   
318                 if (pmd_trans_huge(pmde)) {       
319                         damon_pmdp_mkold(pmd,     
320                         spin_unlock(ptl);         
321                         return 0;                 
322                 }                                 
323                 spin_unlock(ptl);                 
324         }                                         
325                                                   
326         pte = pte_offset_map_lock(walk->mm, pm    
327         if (!pte) {                               
328                 walk->action = ACTION_AGAIN;      
329                 return 0;                         
330         }                                         
331         if (!pte_present(ptep_get(pte)))          
332                 goto out;                         
333         damon_ptep_mkold(pte, walk->vma, addr)    
334 out:                                              
335         pte_unmap_unlock(pte, ptl);               
336         return 0;                                 
337 }                                                 
338                                                   
339 #ifdef CONFIG_HUGETLB_PAGE                        
340 static void damon_hugetlb_mkold(pte_t *pte, st    
341                                 struct vm_area    
342 {                                                 
343         bool referenced = false;                  
344         pte_t entry = huge_ptep_get(mm, addr,     
345         struct folio *folio = pfn_folio(pte_pf    
346         unsigned long psize = huge_page_size(h    
347                                                   
348         folio_get(folio);                         
349                                                   
350         if (pte_young(entry)) {                   
351                 referenced = true;                
352                 entry = pte_mkold(entry);         
353                 set_huge_pte_at(mm, addr, pte,    
354         }                                         
355                                                   
356 #ifdef CONFIG_MMU_NOTIFIER                        
357         if (mmu_notifier_clear_young(mm, addr,    
358                                      addr + hu    
359                 referenced = true;                
360 #endif /* CONFIG_MMU_NOTIFIER */                  
361                                                   
362         if (referenced)                           
363                 folio_set_young(folio);           
364                                                   
365         folio_set_idle(folio);                    
366         folio_put(folio);                         
367 }                                                 
368                                                   
369 static int damon_mkold_hugetlb_entry(pte_t *pt    
370                                      unsigned     
371                                      struct mm    
372 {                                                 
373         struct hstate *h = hstate_vma(walk->vm    
374         spinlock_t *ptl;                          
375         pte_t entry;                              
376                                                   
377         ptl = huge_pte_lock(h, walk->mm, pte);    
378         entry = huge_ptep_get(walk->mm, addr,     
379         if (!pte_present(entry))                  
380                 goto out;                         
381                                                   
382         damon_hugetlb_mkold(pte, walk->mm, wal    
383                                                   
384 out:                                              
385         spin_unlock(ptl);                         
386         return 0;                                 
387 }                                                 
388 #else                                             
389 #define damon_mkold_hugetlb_entry NULL            
390 #endif /* CONFIG_HUGETLB_PAGE */                  
391                                                   
392 static const struct mm_walk_ops damon_mkold_op    
393         .pmd_entry = damon_mkold_pmd_entry,       
394         .hugetlb_entry = damon_mkold_hugetlb_e    
395         .walk_lock = PGWALK_RDLOCK,               
396 };                                                
397                                                   
398 static void damon_va_mkold(struct mm_struct *m    
399 {                                                 
400         mmap_read_lock(mm);                       
401         walk_page_range(mm, addr, addr + 1, &d    
402         mmap_read_unlock(mm);                     
403 }                                                 
404                                                   
405 /*                                                
406  * Functions for the access checking of the re    
407  */                                               
408                                                   
409 static void __damon_va_prepare_access_check(st    
410                                         struct    
411 {                                                 
412         r->sampling_addr = damon_rand(r->ar.st    
413                                                   
414         damon_va_mkold(mm, r->sampling_addr);     
415 }                                                 
416                                                   
417 static void damon_va_prepare_access_checks(str    
418 {                                                 
419         struct damon_target *t;                   
420         struct mm_struct *mm;                     
421         struct damon_region *r;                   
422                                                   
423         damon_for_each_target(t, ctx) {           
424                 mm = damon_get_mm(t);             
425                 if (!mm)                          
426                         continue;                 
427                 damon_for_each_region(r, t)       
428                         __damon_va_prepare_acc    
429                 mmput(mm);                        
430         }                                         
431 }                                                 
432                                                   
433 struct damon_young_walk_private {                 
434         /* size of the folio for the access ch    
435         unsigned long *folio_sz;                  
436         bool young;                               
437 };                                                
438                                                   
439 static int damon_young_pmd_entry(pmd_t *pmd, u    
440                 unsigned long next, struct mm_    
441 {                                                 
442         pte_t *pte;                               
443         pte_t ptent;                              
444         spinlock_t *ptl;                          
445         struct folio *folio;                      
446         struct damon_young_walk_private *priv     
447                                                   
448 #ifdef CONFIG_TRANSPARENT_HUGEPAGE                
449         if (pmd_trans_huge(pmdp_get(pmd))) {      
450                 pmd_t pmde;                       
451                                                   
452                 ptl = pmd_lock(walk->mm, pmd);    
453                 pmde = pmdp_get(pmd);             
454                                                   
455                 if (!pmd_present(pmde)) {         
456                         spin_unlock(ptl);         
457                         return 0;                 
458                 }                                 
459                                                   
460                 if (!pmd_trans_huge(pmde)) {      
461                         spin_unlock(ptl);         
462                         goto regular_page;        
463                 }                                 
464                 folio = damon_get_folio(pmd_pf    
465                 if (!folio)                       
466                         goto huge_out;            
467                 if (pmd_young(pmde) || !folio_    
468                                         mmu_no    
469                                                   
470                         priv->young = true;       
471                 *priv->folio_sz = HPAGE_PMD_SI    
472                 folio_put(folio);                 
473 huge_out:                                         
474                 spin_unlock(ptl);                 
475                 return 0;                         
476         }                                         
477                                                   
478 regular_page:                                     
479 #endif  /* CONFIG_TRANSPARENT_HUGEPAGE */         
480                                                   
481         pte = pte_offset_map_lock(walk->mm, pm    
482         if (!pte) {                               
483                 walk->action = ACTION_AGAIN;      
484                 return 0;                         
485         }                                         
486         ptent = ptep_get(pte);                    
487         if (!pte_present(ptent))                  
488                 goto out;                         
489         folio = damon_get_folio(pte_pfn(ptent)    
490         if (!folio)                               
491                 goto out;                         
492         if (pte_young(ptent) || !folio_test_id    
493                         mmu_notifier_test_youn    
494                 priv->young = true;               
495         *priv->folio_sz = folio_size(folio);      
496         folio_put(folio);                         
497 out:                                              
498         pte_unmap_unlock(pte, ptl);               
499         return 0;                                 
500 }                                                 
501                                                   
502 #ifdef CONFIG_HUGETLB_PAGE                        
503 static int damon_young_hugetlb_entry(pte_t *pt    
504                                      unsigned     
505                                      struct mm    
506 {                                                 
507         struct damon_young_walk_private *priv     
508         struct hstate *h = hstate_vma(walk->vm    
509         struct folio *folio;                      
510         spinlock_t *ptl;                          
511         pte_t entry;                              
512                                                   
513         ptl = huge_pte_lock(h, walk->mm, pte);    
514         entry = huge_ptep_get(walk->mm, addr,     
515         if (!pte_present(entry))                  
516                 goto out;                         
517                                                   
518         folio = pfn_folio(pte_pfn(entry));        
519         folio_get(folio);                         
520                                                   
521         if (pte_young(entry) || !folio_test_id    
522             mmu_notifier_test_young(walk->mm,     
523                 priv->young = true;               
524         *priv->folio_sz = huge_page_size(h);      
525                                                   
526         folio_put(folio);                         
527                                                   
528 out:                                              
529         spin_unlock(ptl);                         
530         return 0;                                 
531 }                                                 
532 #else                                             
533 #define damon_young_hugetlb_entry NULL            
534 #endif /* CONFIG_HUGETLB_PAGE */                  
535                                                   
536 static const struct mm_walk_ops damon_young_op    
537         .pmd_entry = damon_young_pmd_entry,       
538         .hugetlb_entry = damon_young_hugetlb_e    
539         .walk_lock = PGWALK_RDLOCK,               
540 };                                                
541                                                   
542 static bool damon_va_young(struct mm_struct *m    
543                 unsigned long *folio_sz)          
544 {                                                 
545         struct damon_young_walk_private arg =     
546                 .folio_sz = folio_sz,             
547                 .young = false,                   
548         };                                        
549                                                   
550         mmap_read_lock(mm);                       
551         walk_page_range(mm, addr, addr + 1, &d    
552         mmap_read_unlock(mm);                     
553         return arg.young;                         
554 }                                                 
555                                                   
556 /*                                                
557  * Check whether the region was accessed after    
558  *                                                
559  * mm   'mm_struct' for the given virtual addr    
560  * r    the region to be checked                  
561  */                                               
562 static void __damon_va_check_access(struct mm_    
563                                 struct damon_r    
564                                 struct damon_a    
565 {                                                 
566         static unsigned long last_addr;           
567         static unsigned long last_folio_sz = P    
568         static bool last_accessed;                
569                                                   
570         if (!mm) {                                
571                 damon_update_region_access_rat    
572                 return;                           
573         }                                         
574                                                   
575         /* If the region is in the last checke    
576         if (same_target && (ALIGN_DOWN(last_ad    
577                                 ALIGN_DOWN(r->    
578                 damon_update_region_access_rat    
579                 return;                           
580         }                                         
581                                                   
582         last_accessed = damon_va_young(mm, r->    
583         damon_update_region_access_rate(r, las    
584                                                   
585         last_addr = r->sampling_addr;             
586 }                                                 
587                                                   
588 static unsigned int damon_va_check_accesses(st    
589 {                                                 
590         struct damon_target *t;                   
591         struct mm_struct *mm;                     
592         struct damon_region *r;                   
593         unsigned int max_nr_accesses = 0;         
594         bool same_target;                         
595                                                   
596         damon_for_each_target(t, ctx) {           
597                 mm = damon_get_mm(t);             
598                 same_target = false;              
599                 damon_for_each_region(r, t) {     
600                         __damon_va_check_acces    
601                                         &ctx->    
602                         max_nr_accesses = max(    
603                         same_target = true;       
604                 }                                 
605                 if (mm)                           
606                         mmput(mm);                
607         }                                         
608                                                   
609         return max_nr_accesses;                   
610 }                                                 
611                                                   
612 /*                                                
613  * Functions for the target validity check and    
614  */                                               
615                                                   
616 static bool damon_va_target_valid(struct damon    
617 {                                                 
618         struct task_struct *task;                 
619                                                   
620         task = damon_get_task_struct(t);          
621         if (task) {                               
622                 put_task_struct(task);            
623                 return true;                      
624         }                                         
625                                                   
626         return false;                             
627 }                                                 
628                                                   
629 #ifndef CONFIG_ADVISE_SYSCALLS                    
630 static unsigned long damos_madvise(struct damo    
631                 struct damon_region *r, int be    
632 {                                                 
633         return 0;                                 
634 }                                                 
635 #else                                             
636 static unsigned long damos_madvise(struct damo    
637                 struct damon_region *r, int be    
638 {                                                 
639         struct mm_struct *mm;                     
640         unsigned long start = PAGE_ALIGN(r->ar    
641         unsigned long len = PAGE_ALIGN(damon_s    
642         unsigned long applied;                    
643                                                   
644         mm = damon_get_mm(target);                
645         if (!mm)                                  
646                 return 0;                         
647                                                   
648         applied = do_madvise(mm, start, len, b    
649         mmput(mm);                                
650                                                   
651         return applied;                           
652 }                                                 
653 #endif  /* CONFIG_ADVISE_SYSCALLS */              
654                                                   
655 static unsigned long damon_va_apply_scheme(str    
656                 struct damon_target *t, struct    
657                 struct damos *scheme)             
658 {                                                 
659         int madv_action;                          
660                                                   
661         switch (scheme->action) {                 
662         case DAMOS_WILLNEED:                      
663                 madv_action = MADV_WILLNEED;      
664                 break;                            
665         case DAMOS_COLD:                          
666                 madv_action = MADV_COLD;          
667                 break;                            
668         case DAMOS_PAGEOUT:                       
669                 madv_action = MADV_PAGEOUT;       
670                 break;                            
671         case DAMOS_HUGEPAGE:                      
672                 madv_action = MADV_HUGEPAGE;      
673                 break;                            
674         case DAMOS_NOHUGEPAGE:                    
675                 madv_action = MADV_NOHUGEPAGE;    
676                 break;                            
677         case DAMOS_STAT:                          
678                 return 0;                         
679         default:                                  
680                 /*                                
681                  * DAMOS actions that are not     
682                  */                               
683                 return 0;                         
684         }                                         
685                                                   
686         return damos_madvise(t, r, madv_action    
687 }                                                 
688                                                   
689 static int damon_va_scheme_score(struct damon_    
690                 struct damon_target *t, struct    
691                 struct damos *scheme)             
692 {                                                 
693                                                   
694         switch (scheme->action) {                 
695         case DAMOS_PAGEOUT:                       
696                 return damon_cold_score(contex    
697         default:                                  
698                 break;                            
699         }                                         
700                                                   
701         return DAMOS_MAX_SCORE;                   
702 }                                                 
703                                                   
704 static int __init damon_va_initcall(void)         
705 {                                                 
706         struct damon_operations ops = {           
707                 .id = DAMON_OPS_VADDR,            
708                 .init = damon_va_init,            
709                 .update = damon_va_update,        
710                 .prepare_access_checks = damon    
711                 .check_accesses = damon_va_che    
712                 .reset_aggregated = NULL,         
713                 .target_valid = damon_va_targe    
714                 .cleanup = NULL,                  
715                 .apply_scheme = damon_va_apply    
716                 .get_scheme_score = damon_va_s    
717         };                                        
718         /* ops for fixed virtual address range    
719         struct damon_operations ops_fvaddr = o    
720         int err;                                  
721                                                   
722         /* Don't set the monitoring target reg    
723         ops_fvaddr.id = DAMON_OPS_FVADDR;         
724         ops_fvaddr.init = NULL;                   
725         ops_fvaddr.update = NULL;                 
726                                                   
727         err = damon_register_ops(&ops);           
728         if (err)                                  
729                 return err;                       
730         return damon_register_ops(&ops_fvaddr)    
731 };                                                
732                                                   
733 subsys_initcall(damon_va_initcall);               
734                                                   
735 #include "tests/vaddr-kunit.h"                    
736                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php