~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/damon/vaddr.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/damon/vaddr.c (Version linux-6.12-rc7) and /mm/damon/vaddr.c (Version linux-5.16.20)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * DAMON Primitives for Virtual Address Spaces      3  * DAMON Primitives for Virtual Address Spaces
  4  *                                                  4  *
  5  * Author: SeongJae Park <sj@kernel.org>       !!   5  * Author: SeongJae Park <sjpark@amazon.de>
  6  */                                                 6  */
  7                                                     7 
  8 #define pr_fmt(fmt) "damon-va: " fmt                8 #define pr_fmt(fmt) "damon-va: " fmt
  9                                                     9 
                                                   >>  10 #include <asm-generic/mman-common.h>
 10 #include <linux/highmem.h>                         11 #include <linux/highmem.h>
 11 #include <linux/hugetlb.h>                         12 #include <linux/hugetlb.h>
 12 #include <linux/mman.h>                        << 
 13 #include <linux/mmu_notifier.h>                    13 #include <linux/mmu_notifier.h>
 14 #include <linux/page_idle.h>                       14 #include <linux/page_idle.h>
 15 #include <linux/pagewalk.h>                        15 #include <linux/pagewalk.h>
 16 #include <linux/sched/mm.h>                        16 #include <linux/sched/mm.h>
 17                                                    17 
 18 #include "ops-common.h"                        !!  18 #include "prmtv-common.h"
 19                                                    19 
 20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST               20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
 21 #undef DAMON_MIN_REGION                            21 #undef DAMON_MIN_REGION
 22 #define DAMON_MIN_REGION 1                         22 #define DAMON_MIN_REGION 1
 23 #endif                                             23 #endif
 24                                                    24 
 25 /*                                                 25 /*
 26  * 't->pid' should be the pointer to the relev !!  26  * 't->id' should be the pointer to the relevant 'struct pid' having reference
 27  * count.  Caller must put the returned task,      27  * count.  Caller must put the returned task, unless it is NULL.
 28  */                                                28  */
 29 static inline struct task_struct *damon_get_ta !!  29 #define damon_get_task_struct(t) \
 30 {                                              !!  30         (get_pid_task((struct pid *)t->id, PIDTYPE_PID))
 31         return get_pid_task(t->pid, PIDTYPE_PI << 
 32 }                                              << 
 33                                                    31 
 34 /*                                                 32 /*
 35  * Get the mm_struct of the given target           33  * Get the mm_struct of the given target
 36  *                                                 34  *
 37  * Caller _must_ put the mm_struct after use,      35  * Caller _must_ put the mm_struct after use, unless it is NULL.
 38  *                                                 36  *
 39  * Returns the mm_struct of the target on succ     37  * Returns the mm_struct of the target on success, NULL on failure
 40  */                                                38  */
 41 static struct mm_struct *damon_get_mm(struct d     39 static struct mm_struct *damon_get_mm(struct damon_target *t)
 42 {                                                  40 {
 43         struct task_struct *task;                  41         struct task_struct *task;
 44         struct mm_struct *mm;                      42         struct mm_struct *mm;
 45                                                    43 
 46         task = damon_get_task_struct(t);           44         task = damon_get_task_struct(t);
 47         if (!task)                                 45         if (!task)
 48                 return NULL;                       46                 return NULL;
 49                                                    47 
 50         mm = get_task_mm(task);                    48         mm = get_task_mm(task);
 51         put_task_struct(task);                     49         put_task_struct(task);
 52         return mm;                                 50         return mm;
 53 }                                                  51 }
 54                                                    52 
 55 /*                                                 53 /*
 56  * Functions for the initial monitoring target     54  * Functions for the initial monitoring target regions construction
 57  */                                                55  */
 58                                                    56 
 59 /*                                                 57 /*
 60  * Size-evenly split a region into 'nr_pieces'     58  * Size-evenly split a region into 'nr_pieces' small regions
 61  *                                                 59  *
 62  * Returns 0 on success, or negative error cod     60  * Returns 0 on success, or negative error code otherwise.
 63  */                                                61  */
 64 static int damon_va_evenly_split_region(struct     62 static int damon_va_evenly_split_region(struct damon_target *t,
 65                 struct damon_region *r, unsign     63                 struct damon_region *r, unsigned int nr_pieces)
 66 {                                                  64 {
 67         unsigned long sz_orig, sz_piece, orig_     65         unsigned long sz_orig, sz_piece, orig_end;
 68         struct damon_region *n = NULL, *next;      66         struct damon_region *n = NULL, *next;
 69         unsigned long start;                       67         unsigned long start;
 70                                                    68 
 71         if (!r || !nr_pieces)                      69         if (!r || !nr_pieces)
 72                 return -EINVAL;                    70                 return -EINVAL;
 73                                                    71 
 74         orig_end = r->ar.end;                      72         orig_end = r->ar.end;
 75         sz_orig = damon_sz_region(r);          !!  73         sz_orig = r->ar.end - r->ar.start;
 76         sz_piece = ALIGN_DOWN(sz_orig / nr_pie     74         sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
 77                                                    75 
 78         if (!sz_piece)                             76         if (!sz_piece)
 79                 return -EINVAL;                    77                 return -EINVAL;
 80                                                    78 
 81         r->ar.end = r->ar.start + sz_piece;        79         r->ar.end = r->ar.start + sz_piece;
 82         next = damon_next_region(r);               80         next = damon_next_region(r);
 83         for (start = r->ar.end; start + sz_pie     81         for (start = r->ar.end; start + sz_piece <= orig_end;
 84                         start += sz_piece) {       82                         start += sz_piece) {
 85                 n = damon_new_region(start, st     83                 n = damon_new_region(start, start + sz_piece);
 86                 if (!n)                            84                 if (!n)
 87                         return -ENOMEM;            85                         return -ENOMEM;
 88                 damon_insert_region(n, r, next     86                 damon_insert_region(n, r, next, t);
 89                 r = n;                             87                 r = n;
 90         }                                          88         }
 91         /* complement last region for possible     89         /* complement last region for possible rounding error */
 92         if (n)                                     90         if (n)
 93                 n->ar.end = orig_end;              91                 n->ar.end = orig_end;
 94                                                    92 
 95         return 0;                                  93         return 0;
 96 }                                                  94 }
 97                                                    95 
 98 static unsigned long sz_range(struct damon_add     96 static unsigned long sz_range(struct damon_addr_range *r)
 99 {                                                  97 {
100         return r->end - r->start;                  98         return r->end - r->start;
101 }                                                  99 }
102                                                   100 
                                                   >> 101 static void swap_ranges(struct damon_addr_range *r1,
                                                   >> 102                         struct damon_addr_range *r2)
                                                   >> 103 {
                                                   >> 104         struct damon_addr_range tmp;
                                                   >> 105 
                                                   >> 106         tmp = *r1;
                                                   >> 107         *r1 = *r2;
                                                   >> 108         *r2 = tmp;
                                                   >> 109 }
                                                   >> 110 
103 /*                                                111 /*
104  * Find three regions separated by two biggest    112  * Find three regions separated by two biggest unmapped regions
105  *                                                113  *
106  * vma          the head vma of the target add    114  * vma          the head vma of the target address space
107  * regions      an array of three address rang    115  * regions      an array of three address ranges that results will be saved
108  *                                                116  *
109  * This function receives an address space and    117  * This function receives an address space and finds three regions in it which
110  * separated by the two biggest unmapped regio    118  * separated by the two biggest unmapped regions in the space.  Please refer to
111  * below comments of '__damon_va_init_regions(    119  * below comments of '__damon_va_init_regions()' function to know why this is
112  * necessary.                                     120  * necessary.
113  *                                                121  *
114  * Returns 0 if success, or negative error cod    122  * Returns 0 if success, or negative error code otherwise.
115  */                                               123  */
116 static int __damon_va_three_regions(struct mm_ !! 124 static int __damon_va_three_regions(struct vm_area_struct *vma,
117                                        struct     125                                        struct damon_addr_range regions[3])
118 {                                                 126 {
119         struct damon_addr_range first_gap = {0 !! 127         struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
120         VMA_ITERATOR(vmi, mm, 0);              !! 128         struct vm_area_struct *last_vma = NULL;
121         struct vm_area_struct *vma, *prev = NU !! 129         unsigned long start = 0;
122         unsigned long start;                   !! 130         struct rb_root rbroot;
123                                                !! 131 
124         /*                                     !! 132         /* Find two biggest gaps so that first_gap > second_gap > others */
125          * Find the two biggest gaps so that f !! 133         for (; vma; vma = vma->vm_next) {
126          * If this is too slow, it can be opti !! 134                 if (!last_vma) {
127          * tree gaps.                          << 
128          */                                    << 
129         rcu_read_lock();                       << 
130         for_each_vma(vmi, vma) {               << 
131                 unsigned long gap;             << 
132                                                << 
133                 if (!prev) {                   << 
134                         start = vma->vm_start;    135                         start = vma->vm_start;
135                         goto next;                136                         goto next;
136                 }                                 137                 }
137                 gap = vma->vm_start - prev->vm << 
138                                                   138 
139                 if (gap > sz_range(&first_gap) !! 139                 if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
140                         second_gap = first_gap !! 140                         rbroot.rb_node = &vma->vm_rb;
141                         first_gap.start = prev !! 141                         vma = rb_entry(rb_last(&rbroot),
142                         first_gap.end = vma->v !! 142                                         struct vm_area_struct, vm_rb);
143                 } else if (gap > sz_range(&sec !! 143                         goto next;
144                         second_gap.start = pre !! 144                 }
145                         second_gap.end = vma-> !! 145 
                                                   >> 146                 gap.start = last_vma->vm_end;
                                                   >> 147                 gap.end = vma->vm_start;
                                                   >> 148                 if (sz_range(&gap) > sz_range(&second_gap)) {
                                                   >> 149                         swap_ranges(&gap, &second_gap);
                                                   >> 150                         if (sz_range(&second_gap) > sz_range(&first_gap))
                                                   >> 151                                 swap_ranges(&second_gap, &first_gap);
146                 }                                 152                 }
147 next:                                             153 next:
148                 prev = vma;                    !! 154                 last_vma = vma;
149         }                                         155         }
150         rcu_read_unlock();                     << 
151                                                   156 
152         if (!sz_range(&second_gap) || !sz_rang    157         if (!sz_range(&second_gap) || !sz_range(&first_gap))
153                 return -EINVAL;                   158                 return -EINVAL;
154                                                   159 
155         /* Sort the two biggest gaps by addres    160         /* Sort the two biggest gaps by address */
156         if (first_gap.start > second_gap.start    161         if (first_gap.start > second_gap.start)
157                 swap(first_gap, second_gap);   !! 162                 swap_ranges(&first_gap, &second_gap);
158                                                   163 
159         /* Store the result */                    164         /* Store the result */
160         regions[0].start = ALIGN(start, DAMON_    165         regions[0].start = ALIGN(start, DAMON_MIN_REGION);
161         regions[0].end = ALIGN(first_gap.start    166         regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
162         regions[1].start = ALIGN(first_gap.end    167         regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
163         regions[1].end = ALIGN(second_gap.star    168         regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
164         regions[2].start = ALIGN(second_gap.en    169         regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
165         regions[2].end = ALIGN(prev->vm_end, D !! 170         regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
166                                                   171 
167         return 0;                                 172         return 0;
168 }                                                 173 }
169                                                   174 
170 /*                                                175 /*
171  * Get the three regions in the given target (    176  * Get the three regions in the given target (task)
172  *                                                177  *
173  * Returns 0 on success, negative error code o    178  * Returns 0 on success, negative error code otherwise.
174  */                                               179  */
175 static int damon_va_three_regions(struct damon    180 static int damon_va_three_regions(struct damon_target *t,
176                                 struct damon_a    181                                 struct damon_addr_range regions[3])
177 {                                                 182 {
178         struct mm_struct *mm;                     183         struct mm_struct *mm;
179         int rc;                                   184         int rc;
180                                                   185 
181         mm = damon_get_mm(t);                     186         mm = damon_get_mm(t);
182         if (!mm)                                  187         if (!mm)
183                 return -EINVAL;                   188                 return -EINVAL;
184                                                   189 
185         mmap_read_lock(mm);                       190         mmap_read_lock(mm);
186         rc = __damon_va_three_regions(mm, regi !! 191         rc = __damon_va_three_regions(mm->mmap, regions);
187         mmap_read_unlock(mm);                     192         mmap_read_unlock(mm);
188                                                   193 
189         mmput(mm);                                194         mmput(mm);
190         return rc;                                195         return rc;
191 }                                                 196 }
192                                                   197 
193 /*                                                198 /*
194  * Initialize the monitoring target regions fo    199  * Initialize the monitoring target regions for the given target (task)
195  *                                                200  *
196  * t    the given target                          201  * t    the given target
197  *                                                202  *
198  * Because only a number of small portions of     203  * Because only a number of small portions of the entire address space
199  * is actually mapped to the memory and access    204  * is actually mapped to the memory and accessed, monitoring the unmapped
200  * regions is wasteful.  That said, because we    205  * regions is wasteful.  That said, because we can deal with small noises,
201  * tracking every mapping is not strictly requ    206  * tracking every mapping is not strictly required but could even incur a high
202  * overhead if the mapping frequently changes     207  * overhead if the mapping frequently changes or the number of mappings is
203  * high.  The adaptive regions adjustment mech    208  * high.  The adaptive regions adjustment mechanism will further help to deal
204  * with the noise by simply identifying the un    209  * with the noise by simply identifying the unmapped areas as a region that
205  * has no access.  Moreover, applying the real    210  * has no access.  Moreover, applying the real mappings that would have many
206  * unmapped areas inside will make the adaptiv    211  * unmapped areas inside will make the adaptive mechanism quite complex.  That
207  * said, too huge unmapped areas inside the mo    212  * said, too huge unmapped areas inside the monitoring target should be removed
208  * to not take the time for the adaptive mecha    213  * to not take the time for the adaptive mechanism.
209  *                                                214  *
210  * For the reason, we convert the complex mapp    215  * For the reason, we convert the complex mappings to three distinct regions
211  * that cover every mapped area of the address    216  * that cover every mapped area of the address space.  Also the two gaps
212  * between the three regions are the two bigge    217  * between the three regions are the two biggest unmapped areas in the given
213  * address space.  In detail, this function fi    218  * address space.  In detail, this function first identifies the start and the
214  * end of the mappings and the two biggest unm    219  * end of the mappings and the two biggest unmapped areas of the address space.
215  * Then, it constructs the three regions as be    220  * Then, it constructs the three regions as below:
216  *                                                221  *
217  *     [mappings[0]->start, big_two_unmapped_a    222  *     [mappings[0]->start, big_two_unmapped_areas[0]->start)
218  *     [big_two_unmapped_areas[0]->end, big_tw    223  *     [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
219  *     [big_two_unmapped_areas[1]->end, mappin    224  *     [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
220  *                                                225  *
221  * As usual memory map of processes is as belo    226  * As usual memory map of processes is as below, the gap between the heap and
222  * the uppermost mmap()-ed region, and the gap    227  * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
223  * region and the stack will be two biggest un    228  * region and the stack will be two biggest unmapped regions.  Because these
224  * gaps are exceptionally huge areas in usual     229  * gaps are exceptionally huge areas in usual address space, excluding these
225  * two biggest unmapped regions will be suffic    230  * two biggest unmapped regions will be sufficient to make a trade-off.
226  *                                                231  *
227  *   <heap>                                       232  *   <heap>
228  *   <BIG UNMAPPED REGION 1>                      233  *   <BIG UNMAPPED REGION 1>
229  *   <uppermost mmap()-ed region>                 234  *   <uppermost mmap()-ed region>
230  *   (other mmap()-ed regions and small unmapp    235  *   (other mmap()-ed regions and small unmapped regions)
231  *   <lowermost mmap()-ed region>                 236  *   <lowermost mmap()-ed region>
232  *   <BIG UNMAPPED REGION 2>                      237  *   <BIG UNMAPPED REGION 2>
233  *   <stack>                                      238  *   <stack>
234  */                                               239  */
235 static void __damon_va_init_regions(struct dam    240 static void __damon_va_init_regions(struct damon_ctx *ctx,
236                                      struct da    241                                      struct damon_target *t)
237 {                                                 242 {
238         struct damon_target *ti;               << 
239         struct damon_region *r;                   243         struct damon_region *r;
240         struct damon_addr_range regions[3];       244         struct damon_addr_range regions[3];
241         unsigned long sz = 0, nr_pieces;          245         unsigned long sz = 0, nr_pieces;
242         int i, tidx = 0;                       !! 246         int i;
243                                                   247 
244         if (damon_va_three_regions(t, regions)    248         if (damon_va_three_regions(t, regions)) {
245                 damon_for_each_target(ti, ctx) !! 249                 pr_err("Failed to get three regions of target %lu\n", t->id);
246                         if (ti == t)           << 
247                                 break;         << 
248                         tidx++;                << 
249                 }                              << 
250                 pr_debug("Failed to get three  << 
251                 return;                           250                 return;
252         }                                         251         }
253                                                   252 
254         for (i = 0; i < 3; i++)                   253         for (i = 0; i < 3; i++)
255                 sz += regions[i].end - regions    254                 sz += regions[i].end - regions[i].start;
256         if (ctx->attrs.min_nr_regions)         !! 255         if (ctx->min_nr_regions)
257                 sz /= ctx->attrs.min_nr_region !! 256                 sz /= ctx->min_nr_regions;
258         if (sz < DAMON_MIN_REGION)                257         if (sz < DAMON_MIN_REGION)
259                 sz = DAMON_MIN_REGION;            258                 sz = DAMON_MIN_REGION;
260                                                   259 
261         /* Set the initial three regions of th    260         /* Set the initial three regions of the target */
262         for (i = 0; i < 3; i++) {                 261         for (i = 0; i < 3; i++) {
263                 r = damon_new_region(regions[i    262                 r = damon_new_region(regions[i].start, regions[i].end);
264                 if (!r) {                         263                 if (!r) {
265                         pr_err("%d'th init reg    264                         pr_err("%d'th init region creation failed\n", i);
266                         return;                   265                         return;
267                 }                                 266                 }
268                 damon_add_region(r, t);           267                 damon_add_region(r, t);
269                                                   268 
270                 nr_pieces = (regions[i].end -     269                 nr_pieces = (regions[i].end - regions[i].start) / sz;
271                 damon_va_evenly_split_region(t    270                 damon_va_evenly_split_region(t, r, nr_pieces);
272         }                                         271         }
273 }                                                 272 }
274                                                   273 
275 /* Initialize '->regions_list' of every target    274 /* Initialize '->regions_list' of every target (task) */
276 static void damon_va_init(struct damon_ctx *ct !! 275 void damon_va_init(struct damon_ctx *ctx)
277 {                                                 276 {
278         struct damon_target *t;                   277         struct damon_target *t;
279                                                   278 
280         damon_for_each_target(t, ctx) {           279         damon_for_each_target(t, ctx) {
281                 /* the user may set the target    280                 /* the user may set the target regions as they want */
282                 if (!damon_nr_regions(t))         281                 if (!damon_nr_regions(t))
283                         __damon_va_init_region    282                         __damon_va_init_regions(ctx, t);
284         }                                         283         }
285 }                                                 284 }
286                                                   285 
287 /*                                                286 /*
                                                   >> 287  * Functions for the dynamic monitoring target regions update
                                                   >> 288  */
                                                   >> 289 
                                                   >> 290 /*
                                                   >> 291  * Check whether a region is intersecting an address range
                                                   >> 292  *
                                                   >> 293  * Returns true if it is.
                                                   >> 294  */
                                                   >> 295 static bool damon_intersect(struct damon_region *r, struct damon_addr_range *re)
                                                   >> 296 {
                                                   >> 297         return !(r->ar.end <= re->start || re->end <= r->ar.start);
                                                   >> 298 }
                                                   >> 299 
                                                   >> 300 /*
                                                   >> 301  * Update damon regions for the three big regions of the given target
                                                   >> 302  *
                                                   >> 303  * t            the given target
                                                   >> 304  * bregions     the three big regions of the target
                                                   >> 305  */
                                                   >> 306 static void damon_va_apply_three_regions(struct damon_target *t,
                                                   >> 307                 struct damon_addr_range bregions[3])
                                                   >> 308 {
                                                   >> 309         struct damon_region *r, *next;
                                                   >> 310         unsigned int i;
                                                   >> 311 
                                                   >> 312         /* Remove regions which are not in the three big regions now */
                                                   >> 313         damon_for_each_region_safe(r, next, t) {
                                                   >> 314                 for (i = 0; i < 3; i++) {
                                                   >> 315                         if (damon_intersect(r, &bregions[i]))
                                                   >> 316                                 break;
                                                   >> 317                 }
                                                   >> 318                 if (i == 3)
                                                   >> 319                         damon_destroy_region(r, t);
                                                   >> 320         }
                                                   >> 321 
                                                   >> 322         /* Adjust intersecting regions to fit with the three big regions */
                                                   >> 323         for (i = 0; i < 3; i++) {
                                                   >> 324                 struct damon_region *first = NULL, *last;
                                                   >> 325                 struct damon_region *newr;
                                                   >> 326                 struct damon_addr_range *br;
                                                   >> 327 
                                                   >> 328                 br = &bregions[i];
                                                   >> 329                 /* Get the first and last regions which intersects with br */
                                                   >> 330                 damon_for_each_region(r, t) {
                                                   >> 331                         if (damon_intersect(r, br)) {
                                                   >> 332                                 if (!first)
                                                   >> 333                                         first = r;
                                                   >> 334                                 last = r;
                                                   >> 335                         }
                                                   >> 336                         if (r->ar.start >= br->end)
                                                   >> 337                                 break;
                                                   >> 338                 }
                                                   >> 339                 if (!first) {
                                                   >> 340                         /* no damon_region intersects with this big region */
                                                   >> 341                         newr = damon_new_region(
                                                   >> 342                                         ALIGN_DOWN(br->start,
                                                   >> 343                                                 DAMON_MIN_REGION),
                                                   >> 344                                         ALIGN(br->end, DAMON_MIN_REGION));
                                                   >> 345                         if (!newr)
                                                   >> 346                                 continue;
                                                   >> 347                         damon_insert_region(newr, damon_prev_region(r), r, t);
                                                   >> 348                 } else {
                                                   >> 349                         first->ar.start = ALIGN_DOWN(br->start,
                                                   >> 350                                         DAMON_MIN_REGION);
                                                   >> 351                         last->ar.end = ALIGN(br->end, DAMON_MIN_REGION);
                                                   >> 352                 }
                                                   >> 353         }
                                                   >> 354 }
                                                   >> 355 
                                                   >> 356 /*
288  * Update regions for current memory mappings     357  * Update regions for current memory mappings
289  */                                               358  */
290 static void damon_va_update(struct damon_ctx * !! 359 void damon_va_update(struct damon_ctx *ctx)
291 {                                                 360 {
292         struct damon_addr_range three_regions[    361         struct damon_addr_range three_regions[3];
293         struct damon_target *t;                   362         struct damon_target *t;
294                                                   363 
295         damon_for_each_target(t, ctx) {           364         damon_for_each_target(t, ctx) {
296                 if (damon_va_three_regions(t,     365                 if (damon_va_three_regions(t, three_regions))
297                         continue;                 366                         continue;
298                 damon_set_regions(t, three_reg !! 367                 damon_va_apply_three_regions(t, three_regions);
299         }                                         368         }
300 }                                                 369 }
301                                                   370 
302 static int damon_mkold_pmd_entry(pmd_t *pmd, u    371 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
303                 unsigned long next, struct mm_    372                 unsigned long next, struct mm_walk *walk)
304 {                                                 373 {
305         pte_t *pte;                               374         pte_t *pte;
306         pmd_t pmde;                            << 
307         spinlock_t *ptl;                          375         spinlock_t *ptl;
308                                                   376 
309         if (pmd_trans_huge(pmdp_get(pmd))) {   !! 377         if (pmd_huge(*pmd)) {
310                 ptl = pmd_lock(walk->mm, pmd);    378                 ptl = pmd_lock(walk->mm, pmd);
311                 pmde = pmdp_get(pmd);          !! 379                 if (pmd_huge(*pmd)) {
312                                                !! 380                         damon_pmdp_mkold(pmd, walk->mm, addr);
313                 if (!pmd_present(pmde)) {      << 
314                         spin_unlock(ptl);      << 
315                         return 0;              << 
316                 }                              << 
317                                                << 
318                 if (pmd_trans_huge(pmde)) {    << 
319                         damon_pmdp_mkold(pmd,  << 
320                         spin_unlock(ptl);         381                         spin_unlock(ptl);
321                         return 0;                 382                         return 0;
322                 }                                 383                 }
323                 spin_unlock(ptl);                 384                 spin_unlock(ptl);
324         }                                         385         }
325                                                   386 
326         pte = pte_offset_map_lock(walk->mm, pm !! 387         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
327         if (!pte) {                            << 
328                 walk->action = ACTION_AGAIN;   << 
329                 return 0;                         388                 return 0;
330         }                                      !! 389         pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
331         if (!pte_present(ptep_get(pte)))       !! 390         if (!pte_present(*pte))
332                 goto out;                         391                 goto out;
333         damon_ptep_mkold(pte, walk->vma, addr) !! 392         damon_ptep_mkold(pte, walk->mm, addr);
334 out:                                              393 out:
335         pte_unmap_unlock(pte, ptl);               394         pte_unmap_unlock(pte, ptl);
336         return 0;                                 395         return 0;
337 }                                                 396 }
338                                                   397 
339 #ifdef CONFIG_HUGETLB_PAGE                     << 
340 static void damon_hugetlb_mkold(pte_t *pte, st << 
341                                 struct vm_area << 
342 {                                              << 
343         bool referenced = false;               << 
344         pte_t entry = huge_ptep_get(mm, addr,  << 
345         struct folio *folio = pfn_folio(pte_pf << 
346         unsigned long psize = huge_page_size(h << 
347                                                << 
348         folio_get(folio);                      << 
349                                                << 
350         if (pte_young(entry)) {                << 
351                 referenced = true;             << 
352                 entry = pte_mkold(entry);      << 
353                 set_huge_pte_at(mm, addr, pte, << 
354         }                                      << 
355                                                << 
356 #ifdef CONFIG_MMU_NOTIFIER                     << 
357         if (mmu_notifier_clear_young(mm, addr, << 
358                                      addr + hu << 
359                 referenced = true;             << 
360 #endif /* CONFIG_MMU_NOTIFIER */               << 
361                                                << 
362         if (referenced)                        << 
363                 folio_set_young(folio);        << 
364                                                << 
365         folio_set_idle(folio);                 << 
366         folio_put(folio);                      << 
367 }                                              << 
368                                                << 
369 static int damon_mkold_hugetlb_entry(pte_t *pt << 
370                                      unsigned  << 
371                                      struct mm << 
372 {                                              << 
373         struct hstate *h = hstate_vma(walk->vm << 
374         spinlock_t *ptl;                       << 
375         pte_t entry;                           << 
376                                                << 
377         ptl = huge_pte_lock(h, walk->mm, pte); << 
378         entry = huge_ptep_get(walk->mm, addr,  << 
379         if (!pte_present(entry))               << 
380                 goto out;                      << 
381                                                << 
382         damon_hugetlb_mkold(pte, walk->mm, wal << 
383                                                << 
384 out:                                           << 
385         spin_unlock(ptl);                      << 
386         return 0;                              << 
387 }                                              << 
388 #else                                          << 
389 #define damon_mkold_hugetlb_entry NULL         << 
390 #endif /* CONFIG_HUGETLB_PAGE */               << 
391                                                << 
392 static const struct mm_walk_ops damon_mkold_op    398 static const struct mm_walk_ops damon_mkold_ops = {
393         .pmd_entry = damon_mkold_pmd_entry,       399         .pmd_entry = damon_mkold_pmd_entry,
394         .hugetlb_entry = damon_mkold_hugetlb_e << 
395         .walk_lock = PGWALK_RDLOCK,            << 
396 };                                                400 };
397                                                   401 
398 static void damon_va_mkold(struct mm_struct *m    402 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
399 {                                                 403 {
400         mmap_read_lock(mm);                       404         mmap_read_lock(mm);
401         walk_page_range(mm, addr, addr + 1, &d    405         walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
402         mmap_read_unlock(mm);                     406         mmap_read_unlock(mm);
403 }                                                 407 }
404                                                   408 
405 /*                                                409 /*
406  * Functions for the access checking of the re    410  * Functions for the access checking of the regions
407  */                                               411  */
408                                                   412 
409 static void __damon_va_prepare_access_check(st !! 413 static void damon_va_prepare_access_check(struct damon_ctx *ctx,
410                                         struct !! 414                         struct mm_struct *mm, struct damon_region *r)
411 {                                                 415 {
412         r->sampling_addr = damon_rand(r->ar.st    416         r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
413                                                   417 
414         damon_va_mkold(mm, r->sampling_addr);     418         damon_va_mkold(mm, r->sampling_addr);
415 }                                                 419 }
416                                                   420 
417 static void damon_va_prepare_access_checks(str !! 421 void damon_va_prepare_access_checks(struct damon_ctx *ctx)
418 {                                                 422 {
419         struct damon_target *t;                   423         struct damon_target *t;
420         struct mm_struct *mm;                     424         struct mm_struct *mm;
421         struct damon_region *r;                   425         struct damon_region *r;
422                                                   426 
423         damon_for_each_target(t, ctx) {           427         damon_for_each_target(t, ctx) {
424                 mm = damon_get_mm(t);             428                 mm = damon_get_mm(t);
425                 if (!mm)                          429                 if (!mm)
426                         continue;                 430                         continue;
427                 damon_for_each_region(r, t)       431                 damon_for_each_region(r, t)
428                         __damon_va_prepare_acc !! 432                         damon_va_prepare_access_check(ctx, mm, r);
429                 mmput(mm);                        433                 mmput(mm);
430         }                                         434         }
431 }                                                 435 }
432                                                   436 
433 struct damon_young_walk_private {                 437 struct damon_young_walk_private {
434         /* size of the folio for the access ch !! 438         unsigned long *page_sz;
435         unsigned long *folio_sz;               << 
436         bool young;                               439         bool young;
437 };                                                440 };
438                                                   441 
439 static int damon_young_pmd_entry(pmd_t *pmd, u    442 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
440                 unsigned long next, struct mm_    443                 unsigned long next, struct mm_walk *walk)
441 {                                                 444 {
442         pte_t *pte;                               445         pte_t *pte;
443         pte_t ptent;                           << 
444         spinlock_t *ptl;                          446         spinlock_t *ptl;
445         struct folio *folio;                   !! 447         struct page *page;
446         struct damon_young_walk_private *priv     448         struct damon_young_walk_private *priv = walk->private;
447                                                   449 
448 #ifdef CONFIG_TRANSPARENT_HUGEPAGE                450 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
449         if (pmd_trans_huge(pmdp_get(pmd))) {   !! 451         if (pmd_huge(*pmd)) {
450                 pmd_t pmde;                    << 
451                                                << 
452                 ptl = pmd_lock(walk->mm, pmd);    452                 ptl = pmd_lock(walk->mm, pmd);
453                 pmde = pmdp_get(pmd);          !! 453                 if (!pmd_huge(*pmd)) {
454                                                << 
455                 if (!pmd_present(pmde)) {      << 
456                         spin_unlock(ptl);      << 
457                         return 0;              << 
458                 }                              << 
459                                                << 
460                 if (!pmd_trans_huge(pmde)) {   << 
461                         spin_unlock(ptl);         454                         spin_unlock(ptl);
462                         goto regular_page;        455                         goto regular_page;
463                 }                                 456                 }
464                 folio = damon_get_folio(pmd_pf !! 457                 page = damon_get_page(pmd_pfn(*pmd));
465                 if (!folio)                    !! 458                 if (!page)
466                         goto huge_out;            459                         goto huge_out;
467                 if (pmd_young(pmde) || !folio_ !! 460                 if (pmd_young(*pmd) || !page_is_idle(page) ||
468                                         mmu_no    461                                         mmu_notifier_test_young(walk->mm,
469                                                !! 462                                                 addr)) {
                                                   >> 463                         *priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
470                         priv->young = true;       464                         priv->young = true;
471                 *priv->folio_sz = HPAGE_PMD_SI !! 465                 }
472                 folio_put(folio);              !! 466                 put_page(page);
473 huge_out:                                         467 huge_out:
474                 spin_unlock(ptl);                 468                 spin_unlock(ptl);
475                 return 0;                         469                 return 0;
476         }                                         470         }
477                                                   471 
478 regular_page:                                     472 regular_page:
479 #endif  /* CONFIG_TRANSPARENT_HUGEPAGE */         473 #endif  /* CONFIG_TRANSPARENT_HUGEPAGE */
480                                                   474 
                                                   >> 475         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
                                                   >> 476                 return -EINVAL;
481         pte = pte_offset_map_lock(walk->mm, pm    477         pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
482         if (!pte) {                            !! 478         if (!pte_present(*pte))
483                 walk->action = ACTION_AGAIN;   << 
484                 return 0;                      << 
485         }                                      << 
486         ptent = ptep_get(pte);                 << 
487         if (!pte_present(ptent))               << 
488                 goto out;                         479                 goto out;
489         folio = damon_get_folio(pte_pfn(ptent) !! 480         page = damon_get_page(pte_pfn(*pte));
490         if (!folio)                            !! 481         if (!page)
491                 goto out;                         482                 goto out;
492         if (pte_young(ptent) || !folio_test_id !! 483         if (pte_young(*pte) || !page_is_idle(page) ||
493                         mmu_notifier_test_youn !! 484                         mmu_notifier_test_young(walk->mm, addr)) {
                                                   >> 485                 *priv->page_sz = PAGE_SIZE;
494                 priv->young = true;               486                 priv->young = true;
495         *priv->folio_sz = folio_size(folio);   !! 487         }
496         folio_put(folio);                      !! 488         put_page(page);
497 out:                                              489 out:
498         pte_unmap_unlock(pte, ptl);               490         pte_unmap_unlock(pte, ptl);
499         return 0;                                 491         return 0;
500 }                                                 492 }
501                                                   493 
502 #ifdef CONFIG_HUGETLB_PAGE                     << 
503 static int damon_young_hugetlb_entry(pte_t *pt << 
504                                      unsigned  << 
505                                      struct mm << 
506 {                                              << 
507         struct damon_young_walk_private *priv  << 
508         struct hstate *h = hstate_vma(walk->vm << 
509         struct folio *folio;                   << 
510         spinlock_t *ptl;                       << 
511         pte_t entry;                           << 
512                                                << 
513         ptl = huge_pte_lock(h, walk->mm, pte); << 
514         entry = huge_ptep_get(walk->mm, addr,  << 
515         if (!pte_present(entry))               << 
516                 goto out;                      << 
517                                                << 
518         folio = pfn_folio(pte_pfn(entry));     << 
519         folio_get(folio);                      << 
520                                                << 
521         if (pte_young(entry) || !folio_test_id << 
522             mmu_notifier_test_young(walk->mm,  << 
523                 priv->young = true;            << 
524         *priv->folio_sz = huge_page_size(h);   << 
525                                                << 
526         folio_put(folio);                      << 
527                                                << 
528 out:                                           << 
529         spin_unlock(ptl);                      << 
530         return 0;                              << 
531 }                                              << 
532 #else                                          << 
533 #define damon_young_hugetlb_entry NULL         << 
534 #endif /* CONFIG_HUGETLB_PAGE */               << 
535                                                << 
536 static const struct mm_walk_ops damon_young_op    494 static const struct mm_walk_ops damon_young_ops = {
537         .pmd_entry = damon_young_pmd_entry,       495         .pmd_entry = damon_young_pmd_entry,
538         .hugetlb_entry = damon_young_hugetlb_e << 
539         .walk_lock = PGWALK_RDLOCK,            << 
540 };                                                496 };
541                                                   497 
542 static bool damon_va_young(struct mm_struct *m    498 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
543                 unsigned long *folio_sz)       !! 499                 unsigned long *page_sz)
544 {                                                 500 {
545         struct damon_young_walk_private arg =     501         struct damon_young_walk_private arg = {
546                 .folio_sz = folio_sz,          !! 502                 .page_sz = page_sz,
547                 .young = false,                   503                 .young = false,
548         };                                        504         };
549                                                   505 
550         mmap_read_lock(mm);                       506         mmap_read_lock(mm);
551         walk_page_range(mm, addr, addr + 1, &d    507         walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
552         mmap_read_unlock(mm);                     508         mmap_read_unlock(mm);
553         return arg.young;                         509         return arg.young;
554 }                                                 510 }
555                                                   511 
556 /*                                                512 /*
557  * Check whether the region was accessed after    513  * Check whether the region was accessed after the last preparation
558  *                                                514  *
559  * mm   'mm_struct' for the given virtual addr    515  * mm   'mm_struct' for the given virtual address space
560  * r    the region to be checked                  516  * r    the region to be checked
561  */                                               517  */
562 static void __damon_va_check_access(struct mm_ !! 518 static void damon_va_check_access(struct damon_ctx *ctx,
563                                 struct damon_r !! 519                                struct mm_struct *mm, struct damon_region *r)
564                                 struct damon_a << 
565 {                                                 520 {
                                                   >> 521         static struct mm_struct *last_mm;
566         static unsigned long last_addr;           522         static unsigned long last_addr;
567         static unsigned long last_folio_sz = P !! 523         static unsigned long last_page_sz = PAGE_SIZE;
568         static bool last_accessed;                524         static bool last_accessed;
569                                                   525 
570         if (!mm) {                             << 
571                 damon_update_region_access_rat << 
572                 return;                        << 
573         }                                      << 
574                                                << 
575         /* If the region is in the last checke    526         /* If the region is in the last checked page, reuse the result */
576         if (same_target && (ALIGN_DOWN(last_ad !! 527         if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
577                                 ALIGN_DOWN(r-> !! 528                                 ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
578                 damon_update_region_access_rat !! 529                 if (last_accessed)
                                                   >> 530                         r->nr_accesses++;
579                 return;                           531                 return;
580         }                                         532         }
581                                                   533 
582         last_accessed = damon_va_young(mm, r-> !! 534         last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
583         damon_update_region_access_rate(r, las !! 535         if (last_accessed)
                                                   >> 536                 r->nr_accesses++;
584                                                   537 
                                                   >> 538         last_mm = mm;
585         last_addr = r->sampling_addr;             539         last_addr = r->sampling_addr;
586 }                                                 540 }
587                                                   541 
588 static unsigned int damon_va_check_accesses(st !! 542 unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
589 {                                                 543 {
590         struct damon_target *t;                   544         struct damon_target *t;
591         struct mm_struct *mm;                     545         struct mm_struct *mm;
592         struct damon_region *r;                   546         struct damon_region *r;
593         unsigned int max_nr_accesses = 0;         547         unsigned int max_nr_accesses = 0;
594         bool same_target;                      << 
595                                                   548 
596         damon_for_each_target(t, ctx) {           549         damon_for_each_target(t, ctx) {
597                 mm = damon_get_mm(t);             550                 mm = damon_get_mm(t);
598                 same_target = false;           !! 551                 if (!mm)
                                                   >> 552                         continue;
599                 damon_for_each_region(r, t) {     553                 damon_for_each_region(r, t) {
600                         __damon_va_check_acces !! 554                         damon_va_check_access(ctx, mm, r);
601                                         &ctx-> << 
602                         max_nr_accesses = max(    555                         max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
603                         same_target = true;    << 
604                 }                                 556                 }
605                 if (mm)                        !! 557                 mmput(mm);
606                         mmput(mm);             << 
607         }                                         558         }
608                                                   559 
609         return max_nr_accesses;                   560         return max_nr_accesses;
610 }                                                 561 }
611                                                   562 
612 /*                                                563 /*
613  * Functions for the target validity check and    564  * Functions for the target validity check and cleanup
614  */                                               565  */
615                                                   566 
616 static bool damon_va_target_valid(struct damon !! 567 bool damon_va_target_valid(void *target)
617 {                                                 568 {
                                                   >> 569         struct damon_target *t = target;
618         struct task_struct *task;                 570         struct task_struct *task;
619                                                   571 
620         task = damon_get_task_struct(t);          572         task = damon_get_task_struct(t);
621         if (task) {                               573         if (task) {
622                 put_task_struct(task);            574                 put_task_struct(task);
623                 return true;                      575                 return true;
624         }                                         576         }
625                                                   577 
626         return false;                             578         return false;
627 }                                                 579 }
628                                                   580 
629 #ifndef CONFIG_ADVISE_SYSCALLS                    581 #ifndef CONFIG_ADVISE_SYSCALLS
630 static unsigned long damos_madvise(struct damo !! 582 static int damos_madvise(struct damon_target *target, struct damon_region *r,
631                 struct damon_region *r, int be !! 583                         int behavior)
632 {                                                 584 {
633         return 0;                              !! 585         return -EINVAL;
634 }                                                 586 }
635 #else                                             587 #else
636 static unsigned long damos_madvise(struct damo !! 588 static int damos_madvise(struct damon_target *target, struct damon_region *r,
637                 struct damon_region *r, int be !! 589                         int behavior)
638 {                                                 590 {
639         struct mm_struct *mm;                     591         struct mm_struct *mm;
640         unsigned long start = PAGE_ALIGN(r->ar !! 592         int ret = -ENOMEM;
641         unsigned long len = PAGE_ALIGN(damon_s << 
642         unsigned long applied;                 << 
643                                                   593 
644         mm = damon_get_mm(target);                594         mm = damon_get_mm(target);
645         if (!mm)                                  595         if (!mm)
646                 return 0;                      !! 596                 goto out;
647                                                   597 
648         applied = do_madvise(mm, start, len, b !! 598         ret = do_madvise(mm, PAGE_ALIGN(r->ar.start),
                                                   >> 599                         PAGE_ALIGN(r->ar.end - r->ar.start), behavior);
649         mmput(mm);                                600         mmput(mm);
650                                                !! 601 out:
651         return applied;                        !! 602         return ret;
652 }                                                 603 }
653 #endif  /* CONFIG_ADVISE_SYSCALLS */              604 #endif  /* CONFIG_ADVISE_SYSCALLS */
654                                                   605 
655 static unsigned long damon_va_apply_scheme(str !! 606 int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
656                 struct damon_target *t, struct !! 607                 struct damon_region *r, struct damos *scheme)
657                 struct damos *scheme)          << 
658 {                                                 608 {
659         int madv_action;                          609         int madv_action;
660                                                   610 
661         switch (scheme->action) {                 611         switch (scheme->action) {
662         case DAMOS_WILLNEED:                      612         case DAMOS_WILLNEED:
663                 madv_action = MADV_WILLNEED;      613                 madv_action = MADV_WILLNEED;
664                 break;                            614                 break;
665         case DAMOS_COLD:                          615         case DAMOS_COLD:
666                 madv_action = MADV_COLD;          616                 madv_action = MADV_COLD;
667                 break;                            617                 break;
668         case DAMOS_PAGEOUT:                       618         case DAMOS_PAGEOUT:
669                 madv_action = MADV_PAGEOUT;       619                 madv_action = MADV_PAGEOUT;
670                 break;                            620                 break;
671         case DAMOS_HUGEPAGE:                      621         case DAMOS_HUGEPAGE:
672                 madv_action = MADV_HUGEPAGE;      622                 madv_action = MADV_HUGEPAGE;
673                 break;                            623                 break;
674         case DAMOS_NOHUGEPAGE:                    624         case DAMOS_NOHUGEPAGE:
675                 madv_action = MADV_NOHUGEPAGE;    625                 madv_action = MADV_NOHUGEPAGE;
676                 break;                            626                 break;
677         case DAMOS_STAT:                          627         case DAMOS_STAT:
678                 return 0;                         628                 return 0;
679         default:                                  629         default:
680                 /*                             !! 630                 return -EINVAL;
681                  * DAMOS actions that are not  << 
682                  */                            << 
683                 return 0;                      << 
684         }                                         631         }
685                                                   632 
686         return damos_madvise(t, r, madv_action    633         return damos_madvise(t, r, madv_action);
687 }                                                 634 }
688                                                   635 
689 static int damon_va_scheme_score(struct damon_ !! 636 int damon_va_scheme_score(struct damon_ctx *context, struct damon_target *t,
690                 struct damon_target *t, struct !! 637                 struct damon_region *r, struct damos *scheme)
691                 struct damos *scheme)          << 
692 {                                                 638 {
693                                                   639 
694         switch (scheme->action) {                 640         switch (scheme->action) {
695         case DAMOS_PAGEOUT:                       641         case DAMOS_PAGEOUT:
696                 return damon_cold_score(contex !! 642                 return damon_pageout_score(context, r, scheme);
697         default:                                  643         default:
698                 break;                            644                 break;
699         }                                         645         }
700                                                   646 
701         return DAMOS_MAX_SCORE;                   647         return DAMOS_MAX_SCORE;
702 }                                                 648 }
703                                                   649 
704 static int __init damon_va_initcall(void)      !! 650 void damon_va_set_primitives(struct damon_ctx *ctx)
705 {                                                 651 {
706         struct damon_operations ops = {        !! 652         ctx->primitive.init = damon_va_init;
707                 .id = DAMON_OPS_VADDR,         !! 653         ctx->primitive.update = damon_va_update;
708                 .init = damon_va_init,         !! 654         ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks;
709                 .update = damon_va_update,     !! 655         ctx->primitive.check_accesses = damon_va_check_accesses;
710                 .prepare_access_checks = damon !! 656         ctx->primitive.reset_aggregated = NULL;
711                 .check_accesses = damon_va_che !! 657         ctx->primitive.target_valid = damon_va_target_valid;
712                 .reset_aggregated = NULL,      !! 658         ctx->primitive.cleanup = NULL;
713                 .target_valid = damon_va_targe !! 659         ctx->primitive.apply_scheme = damon_va_apply_scheme;
714                 .cleanup = NULL,               !! 660         ctx->primitive.get_scheme_score = damon_va_scheme_score;
715                 .apply_scheme = damon_va_apply !! 661 }
716                 .get_scheme_score = damon_va_s << 
717         };                                     << 
718         /* ops for fixed virtual address range << 
719         struct damon_operations ops_fvaddr = o << 
720         int err;                               << 
721                                                << 
722         /* Don't set the monitoring target reg << 
723         ops_fvaddr.id = DAMON_OPS_FVADDR;      << 
724         ops_fvaddr.init = NULL;                << 
725         ops_fvaddr.update = NULL;              << 
726                                                << 
727         err = damon_register_ops(&ops);        << 
728         if (err)                               << 
729                 return err;                    << 
730         return damon_register_ops(&ops_fvaddr) << 
731 };                                             << 
732                                                << 
733 subsys_initcall(damon_va_initcall);            << 
734                                                   662 
735 #include "tests/vaddr-kunit.h"                 !! 663 #include "vaddr-test.h"
736                                                   664 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php