1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * DAMON Primitives for Virtual Address Spaces 3 * DAMON Primitives for Virtual Address Spaces 4 * 4 * 5 * Author: SeongJae Park <sj@kernel.org> !! 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 6 */ 7 7 8 #define pr_fmt(fmt) "damon-va: " fmt 8 #define pr_fmt(fmt) "damon-va: " fmt 9 9 >> 10 #include <asm-generic/mman-common.h> 10 #include <linux/highmem.h> 11 #include <linux/highmem.h> 11 #include <linux/hugetlb.h> 12 #include <linux/hugetlb.h> 12 #include <linux/mman.h> << 13 #include <linux/mmu_notifier.h> 13 #include <linux/mmu_notifier.h> 14 #include <linux/page_idle.h> 14 #include <linux/page_idle.h> 15 #include <linux/pagewalk.h> 15 #include <linux/pagewalk.h> 16 #include <linux/sched/mm.h> 16 #include <linux/sched/mm.h> 17 17 18 #include "ops-common.h" 18 #include "ops-common.h" 19 19 20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST 20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST 21 #undef DAMON_MIN_REGION 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 22 #define DAMON_MIN_REGION 1 23 #endif 23 #endif 24 24 25 /* 25 /* 26 * 't->pid' should be the pointer to the relev 26 * 't->pid' should be the pointer to the relevant 'struct pid' having reference 27 * count. Caller must put the returned task, 27 * count. Caller must put the returned task, unless it is NULL. 28 */ 28 */ 29 static inline struct task_struct *damon_get_ta 29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t) 30 { 30 { 31 return get_pid_task(t->pid, PIDTYPE_PI 31 return get_pid_task(t->pid, PIDTYPE_PID); 32 } 32 } 33 33 34 /* 34 /* 35 * Get the mm_struct of the given target 35 * Get the mm_struct of the given target 36 * 36 * 37 * Caller _must_ put the mm_struct after use, 37 * Caller _must_ put the mm_struct after use, unless it is NULL. 38 * 38 * 39 * Returns the mm_struct of the target on succ 39 * Returns the mm_struct of the target on success, NULL on failure 40 */ 40 */ 41 static struct mm_struct *damon_get_mm(struct d 41 static struct mm_struct *damon_get_mm(struct damon_target *t) 42 { 42 { 43 struct task_struct *task; 43 struct task_struct *task; 44 struct mm_struct *mm; 44 struct mm_struct *mm; 45 45 46 task = damon_get_task_struct(t); 46 task = damon_get_task_struct(t); 47 if (!task) 47 if (!task) 48 return NULL; 48 return NULL; 49 49 50 mm = get_task_mm(task); 50 mm = get_task_mm(task); 51 put_task_struct(task); 51 put_task_struct(task); 52 return mm; 52 return mm; 53 } 53 } 54 54 55 /* 55 /* 56 * Functions for the initial monitoring target 56 * Functions for the initial monitoring target regions construction 57 */ 57 */ 58 58 59 /* 59 /* 60 * Size-evenly split a region into 'nr_pieces' 60 * Size-evenly split a region into 'nr_pieces' small regions 61 * 61 * 62 * Returns 0 on success, or negative error cod 62 * Returns 0 on success, or negative error code otherwise. 63 */ 63 */ 64 static int damon_va_evenly_split_region(struct 64 static int damon_va_evenly_split_region(struct damon_target *t, 65 struct damon_region *r, unsign 65 struct damon_region *r, unsigned int nr_pieces) 66 { 66 { 67 unsigned long sz_orig, sz_piece, orig_ 67 unsigned long sz_orig, sz_piece, orig_end; 68 struct damon_region *n = NULL, *next; 68 struct damon_region *n = NULL, *next; 69 unsigned long start; 69 unsigned long start; 70 70 71 if (!r || !nr_pieces) 71 if (!r || !nr_pieces) 72 return -EINVAL; 72 return -EINVAL; 73 73 74 orig_end = r->ar.end; 74 orig_end = r->ar.end; 75 sz_orig = damon_sz_region(r); 75 sz_orig = damon_sz_region(r); 76 sz_piece = ALIGN_DOWN(sz_orig / nr_pie 76 sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION); 77 77 78 if (!sz_piece) 78 if (!sz_piece) 79 return -EINVAL; 79 return -EINVAL; 80 80 81 r->ar.end = r->ar.start + sz_piece; 81 r->ar.end = r->ar.start + sz_piece; 82 next = damon_next_region(r); 82 next = damon_next_region(r); 83 for (start = r->ar.end; start + sz_pie 83 for (start = r->ar.end; start + sz_piece <= orig_end; 84 start += sz_piece) { 84 start += sz_piece) { 85 n = damon_new_region(start, st 85 n = damon_new_region(start, start + sz_piece); 86 if (!n) 86 if (!n) 87 return -ENOMEM; 87 return -ENOMEM; 88 damon_insert_region(n, r, next 88 damon_insert_region(n, r, next, t); 89 r = n; 89 r = n; 90 } 90 } 91 /* complement last region for possible 91 /* complement last region for possible rounding error */ 92 if (n) 92 if (n) 93 n->ar.end = orig_end; 93 n->ar.end = orig_end; 94 94 95 return 0; 95 return 0; 96 } 96 } 97 97 98 static unsigned long sz_range(struct damon_add 98 static unsigned long sz_range(struct damon_addr_range *r) 99 { 99 { 100 return r->end - r->start; 100 return r->end - r->start; 101 } 101 } 102 102 103 /* 103 /* 104 * Find three regions separated by two biggest 104 * Find three regions separated by two biggest unmapped regions 105 * 105 * 106 * vma the head vma of the target add 106 * vma the head vma of the target address space 107 * regions an array of three address rang 107 * regions an array of three address ranges that results will be saved 108 * 108 * 109 * This function receives an address space and 109 * This function receives an address space and finds three regions in it which 110 * separated by the two biggest unmapped regio 110 * separated by the two biggest unmapped regions in the space. Please refer to 111 * below comments of '__damon_va_init_regions( 111 * below comments of '__damon_va_init_regions()' function to know why this is 112 * necessary. 112 * necessary. 113 * 113 * 114 * Returns 0 if success, or negative error cod 114 * Returns 0 if success, or negative error code otherwise. 115 */ 115 */ 116 static int __damon_va_three_regions(struct mm_ 116 static int __damon_va_three_regions(struct mm_struct *mm, 117 struct 117 struct damon_addr_range regions[3]) 118 { 118 { 119 struct damon_addr_range first_gap = {0 119 struct damon_addr_range first_gap = {0}, second_gap = {0}; 120 VMA_ITERATOR(vmi, mm, 0); 120 VMA_ITERATOR(vmi, mm, 0); 121 struct vm_area_struct *vma, *prev = NU 121 struct vm_area_struct *vma, *prev = NULL; 122 unsigned long start; 122 unsigned long start; 123 123 124 /* 124 /* 125 * Find the two biggest gaps so that f 125 * Find the two biggest gaps so that first_gap > second_gap > others. 126 * If this is too slow, it can be opti 126 * If this is too slow, it can be optimised to examine the maple 127 * tree gaps. 127 * tree gaps. 128 */ 128 */ 129 rcu_read_lock(); << 130 for_each_vma(vmi, vma) { 129 for_each_vma(vmi, vma) { 131 unsigned long gap; 130 unsigned long gap; 132 131 133 if (!prev) { 132 if (!prev) { 134 start = vma->vm_start; 133 start = vma->vm_start; 135 goto next; 134 goto next; 136 } 135 } 137 gap = vma->vm_start - prev->vm 136 gap = vma->vm_start - prev->vm_end; 138 137 139 if (gap > sz_range(&first_gap) 138 if (gap > sz_range(&first_gap)) { 140 second_gap = first_gap 139 second_gap = first_gap; 141 first_gap.start = prev 140 first_gap.start = prev->vm_end; 142 first_gap.end = vma->v 141 first_gap.end = vma->vm_start; 143 } else if (gap > sz_range(&sec 142 } else if (gap > sz_range(&second_gap)) { 144 second_gap.start = pre 143 second_gap.start = prev->vm_end; 145 second_gap.end = vma-> 144 second_gap.end = vma->vm_start; 146 } 145 } 147 next: 146 next: 148 prev = vma; 147 prev = vma; 149 } 148 } 150 rcu_read_unlock(); << 151 149 152 if (!sz_range(&second_gap) || !sz_rang 150 if (!sz_range(&second_gap) || !sz_range(&first_gap)) 153 return -EINVAL; 151 return -EINVAL; 154 152 155 /* Sort the two biggest gaps by addres 153 /* Sort the two biggest gaps by address */ 156 if (first_gap.start > second_gap.start 154 if (first_gap.start > second_gap.start) 157 swap(first_gap, second_gap); 155 swap(first_gap, second_gap); 158 156 159 /* Store the result */ 157 /* Store the result */ 160 regions[0].start = ALIGN(start, DAMON_ 158 regions[0].start = ALIGN(start, DAMON_MIN_REGION); 161 regions[0].end = ALIGN(first_gap.start 159 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); 162 regions[1].start = ALIGN(first_gap.end 160 regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); 163 regions[1].end = ALIGN(second_gap.star 161 regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION); 164 regions[2].start = ALIGN(second_gap.en 162 regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION); 165 regions[2].end = ALIGN(prev->vm_end, D 163 regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION); 166 164 167 return 0; 165 return 0; 168 } 166 } 169 167 170 /* 168 /* 171 * Get the three regions in the given target ( 169 * Get the three regions in the given target (task) 172 * 170 * 173 * Returns 0 on success, negative error code o 171 * Returns 0 on success, negative error code otherwise. 174 */ 172 */ 175 static int damon_va_three_regions(struct damon 173 static int damon_va_three_regions(struct damon_target *t, 176 struct damon_a 174 struct damon_addr_range regions[3]) 177 { 175 { 178 struct mm_struct *mm; 176 struct mm_struct *mm; 179 int rc; 177 int rc; 180 178 181 mm = damon_get_mm(t); 179 mm = damon_get_mm(t); 182 if (!mm) 180 if (!mm) 183 return -EINVAL; 181 return -EINVAL; 184 182 185 mmap_read_lock(mm); 183 mmap_read_lock(mm); 186 rc = __damon_va_three_regions(mm, regi 184 rc = __damon_va_three_regions(mm, regions); 187 mmap_read_unlock(mm); 185 mmap_read_unlock(mm); 188 186 189 mmput(mm); 187 mmput(mm); 190 return rc; 188 return rc; 191 } 189 } 192 190 193 /* 191 /* 194 * Initialize the monitoring target regions fo 192 * Initialize the monitoring target regions for the given target (task) 195 * 193 * 196 * t the given target 194 * t the given target 197 * 195 * 198 * Because only a number of small portions of 196 * Because only a number of small portions of the entire address space 199 * is actually mapped to the memory and access 197 * is actually mapped to the memory and accessed, monitoring the unmapped 200 * regions is wasteful. That said, because we 198 * regions is wasteful. That said, because we can deal with small noises, 201 * tracking every mapping is not strictly requ 199 * tracking every mapping is not strictly required but could even incur a high 202 * overhead if the mapping frequently changes 200 * overhead if the mapping frequently changes or the number of mappings is 203 * high. The adaptive regions adjustment mech 201 * high. The adaptive regions adjustment mechanism will further help to deal 204 * with the noise by simply identifying the un 202 * with the noise by simply identifying the unmapped areas as a region that 205 * has no access. Moreover, applying the real 203 * has no access. Moreover, applying the real mappings that would have many 206 * unmapped areas inside will make the adaptiv 204 * unmapped areas inside will make the adaptive mechanism quite complex. That 207 * said, too huge unmapped areas inside the mo 205 * said, too huge unmapped areas inside the monitoring target should be removed 208 * to not take the time for the adaptive mecha 206 * to not take the time for the adaptive mechanism. 209 * 207 * 210 * For the reason, we convert the complex mapp 208 * For the reason, we convert the complex mappings to three distinct regions 211 * that cover every mapped area of the address 209 * that cover every mapped area of the address space. Also the two gaps 212 * between the three regions are the two bigge 210 * between the three regions are the two biggest unmapped areas in the given 213 * address space. In detail, this function fi 211 * address space. In detail, this function first identifies the start and the 214 * end of the mappings and the two biggest unm 212 * end of the mappings and the two biggest unmapped areas of the address space. 215 * Then, it constructs the three regions as be 213 * Then, it constructs the three regions as below: 216 * 214 * 217 * [mappings[0]->start, big_two_unmapped_a 215 * [mappings[0]->start, big_two_unmapped_areas[0]->start) 218 * [big_two_unmapped_areas[0]->end, big_tw 216 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start) 219 * [big_two_unmapped_areas[1]->end, mappin 217 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end) 220 * 218 * 221 * As usual memory map of processes is as belo 219 * As usual memory map of processes is as below, the gap between the heap and 222 * the uppermost mmap()-ed region, and the gap 220 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed 223 * region and the stack will be two biggest un 221 * region and the stack will be two biggest unmapped regions. Because these 224 * gaps are exceptionally huge areas in usual 222 * gaps are exceptionally huge areas in usual address space, excluding these 225 * two biggest unmapped regions will be suffic 223 * two biggest unmapped regions will be sufficient to make a trade-off. 226 * 224 * 227 * <heap> 225 * <heap> 228 * <BIG UNMAPPED REGION 1> 226 * <BIG UNMAPPED REGION 1> 229 * <uppermost mmap()-ed region> 227 * <uppermost mmap()-ed region> 230 * (other mmap()-ed regions and small unmapp 228 * (other mmap()-ed regions and small unmapped regions) 231 * <lowermost mmap()-ed region> 229 * <lowermost mmap()-ed region> 232 * <BIG UNMAPPED REGION 2> 230 * <BIG UNMAPPED REGION 2> 233 * <stack> 231 * <stack> 234 */ 232 */ 235 static void __damon_va_init_regions(struct dam 233 static void __damon_va_init_regions(struct damon_ctx *ctx, 236 struct da 234 struct damon_target *t) 237 { 235 { 238 struct damon_target *ti; 236 struct damon_target *ti; 239 struct damon_region *r; 237 struct damon_region *r; 240 struct damon_addr_range regions[3]; 238 struct damon_addr_range regions[3]; 241 unsigned long sz = 0, nr_pieces; 239 unsigned long sz = 0, nr_pieces; 242 int i, tidx = 0; 240 int i, tidx = 0; 243 241 244 if (damon_va_three_regions(t, regions) 242 if (damon_va_three_regions(t, regions)) { 245 damon_for_each_target(ti, ctx) 243 damon_for_each_target(ti, ctx) { 246 if (ti == t) 244 if (ti == t) 247 break; 245 break; 248 tidx++; 246 tidx++; 249 } 247 } 250 pr_debug("Failed to get three 248 pr_debug("Failed to get three regions of %dth target\n", tidx); 251 return; 249 return; 252 } 250 } 253 251 254 for (i = 0; i < 3; i++) 252 for (i = 0; i < 3; i++) 255 sz += regions[i].end - regions 253 sz += regions[i].end - regions[i].start; 256 if (ctx->attrs.min_nr_regions) 254 if (ctx->attrs.min_nr_regions) 257 sz /= ctx->attrs.min_nr_region 255 sz /= ctx->attrs.min_nr_regions; 258 if (sz < DAMON_MIN_REGION) 256 if (sz < DAMON_MIN_REGION) 259 sz = DAMON_MIN_REGION; 257 sz = DAMON_MIN_REGION; 260 258 261 /* Set the initial three regions of th 259 /* Set the initial three regions of the target */ 262 for (i = 0; i < 3; i++) { 260 for (i = 0; i < 3; i++) { 263 r = damon_new_region(regions[i 261 r = damon_new_region(regions[i].start, regions[i].end); 264 if (!r) { 262 if (!r) { 265 pr_err("%d'th init reg 263 pr_err("%d'th init region creation failed\n", i); 266 return; 264 return; 267 } 265 } 268 damon_add_region(r, t); 266 damon_add_region(r, t); 269 267 270 nr_pieces = (regions[i].end - 268 nr_pieces = (regions[i].end - regions[i].start) / sz; 271 damon_va_evenly_split_region(t 269 damon_va_evenly_split_region(t, r, nr_pieces); 272 } 270 } 273 } 271 } 274 272 275 /* Initialize '->regions_list' of every target 273 /* Initialize '->regions_list' of every target (task) */ 276 static void damon_va_init(struct damon_ctx *ct 274 static void damon_va_init(struct damon_ctx *ctx) 277 { 275 { 278 struct damon_target *t; 276 struct damon_target *t; 279 277 280 damon_for_each_target(t, ctx) { 278 damon_for_each_target(t, ctx) { 281 /* the user may set the target 279 /* the user may set the target regions as they want */ 282 if (!damon_nr_regions(t)) 280 if (!damon_nr_regions(t)) 283 __damon_va_init_region 281 __damon_va_init_regions(ctx, t); 284 } 282 } 285 } 283 } 286 284 287 /* 285 /* 288 * Update regions for current memory mappings 286 * Update regions for current memory mappings 289 */ 287 */ 290 static void damon_va_update(struct damon_ctx * 288 static void damon_va_update(struct damon_ctx *ctx) 291 { 289 { 292 struct damon_addr_range three_regions[ 290 struct damon_addr_range three_regions[3]; 293 struct damon_target *t; 291 struct damon_target *t; 294 292 295 damon_for_each_target(t, ctx) { 293 damon_for_each_target(t, ctx) { 296 if (damon_va_three_regions(t, 294 if (damon_va_three_regions(t, three_regions)) 297 continue; 295 continue; 298 damon_set_regions(t, three_reg 296 damon_set_regions(t, three_regions, 3); 299 } 297 } 300 } 298 } 301 299 302 static int damon_mkold_pmd_entry(pmd_t *pmd, u 300 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, 303 unsigned long next, struct mm_ 301 unsigned long next, struct mm_walk *walk) 304 { 302 { 305 pte_t *pte; 303 pte_t *pte; 306 pmd_t pmde; 304 pmd_t pmde; 307 spinlock_t *ptl; 305 spinlock_t *ptl; 308 306 309 if (pmd_trans_huge(pmdp_get(pmd))) { 307 if (pmd_trans_huge(pmdp_get(pmd))) { 310 ptl = pmd_lock(walk->mm, pmd); 308 ptl = pmd_lock(walk->mm, pmd); 311 pmde = pmdp_get(pmd); 309 pmde = pmdp_get(pmd); 312 310 313 if (!pmd_present(pmde)) { 311 if (!pmd_present(pmde)) { 314 spin_unlock(ptl); 312 spin_unlock(ptl); 315 return 0; 313 return 0; 316 } 314 } 317 315 318 if (pmd_trans_huge(pmde)) { 316 if (pmd_trans_huge(pmde)) { 319 damon_pmdp_mkold(pmd, 317 damon_pmdp_mkold(pmd, walk->vma, addr); 320 spin_unlock(ptl); 318 spin_unlock(ptl); 321 return 0; 319 return 0; 322 } 320 } 323 spin_unlock(ptl); 321 spin_unlock(ptl); 324 } 322 } 325 323 326 pte = pte_offset_map_lock(walk->mm, pm 324 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 327 if (!pte) { 325 if (!pte) { 328 walk->action = ACTION_AGAIN; 326 walk->action = ACTION_AGAIN; 329 return 0; 327 return 0; 330 } 328 } 331 if (!pte_present(ptep_get(pte))) 329 if (!pte_present(ptep_get(pte))) 332 goto out; 330 goto out; 333 damon_ptep_mkold(pte, walk->vma, addr) 331 damon_ptep_mkold(pte, walk->vma, addr); 334 out: 332 out: 335 pte_unmap_unlock(pte, ptl); 333 pte_unmap_unlock(pte, ptl); 336 return 0; 334 return 0; 337 } 335 } 338 336 339 #ifdef CONFIG_HUGETLB_PAGE 337 #ifdef CONFIG_HUGETLB_PAGE 340 static void damon_hugetlb_mkold(pte_t *pte, st 338 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, 341 struct vm_area 339 struct vm_area_struct *vma, unsigned long addr) 342 { 340 { 343 bool referenced = false; 341 bool referenced = false; 344 pte_t entry = huge_ptep_get(mm, addr, !! 342 pte_t entry = huge_ptep_get(pte); 345 struct folio *folio = pfn_folio(pte_pf 343 struct folio *folio = pfn_folio(pte_pfn(entry)); 346 unsigned long psize = huge_page_size(h 344 unsigned long psize = huge_page_size(hstate_vma(vma)); 347 345 348 folio_get(folio); 346 folio_get(folio); 349 347 350 if (pte_young(entry)) { 348 if (pte_young(entry)) { 351 referenced = true; 349 referenced = true; 352 entry = pte_mkold(entry); 350 entry = pte_mkold(entry); 353 set_huge_pte_at(mm, addr, pte, 351 set_huge_pte_at(mm, addr, pte, entry, psize); 354 } 352 } 355 353 356 #ifdef CONFIG_MMU_NOTIFIER 354 #ifdef CONFIG_MMU_NOTIFIER 357 if (mmu_notifier_clear_young(mm, addr, 355 if (mmu_notifier_clear_young(mm, addr, 358 addr + hu 356 addr + huge_page_size(hstate_vma(vma)))) 359 referenced = true; 357 referenced = true; 360 #endif /* CONFIG_MMU_NOTIFIER */ 358 #endif /* CONFIG_MMU_NOTIFIER */ 361 359 362 if (referenced) 360 if (referenced) 363 folio_set_young(folio); 361 folio_set_young(folio); 364 362 365 folio_set_idle(folio); 363 folio_set_idle(folio); 366 folio_put(folio); 364 folio_put(folio); 367 } 365 } 368 366 369 static int damon_mkold_hugetlb_entry(pte_t *pt 367 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, 370 unsigned 368 unsigned long addr, unsigned long end, 371 struct mm 369 struct mm_walk *walk) 372 { 370 { 373 struct hstate *h = hstate_vma(walk->vm 371 struct hstate *h = hstate_vma(walk->vma); 374 spinlock_t *ptl; 372 spinlock_t *ptl; 375 pte_t entry; 373 pte_t entry; 376 374 377 ptl = huge_pte_lock(h, walk->mm, pte); 375 ptl = huge_pte_lock(h, walk->mm, pte); 378 entry = huge_ptep_get(walk->mm, addr, !! 376 entry = huge_ptep_get(pte); 379 if (!pte_present(entry)) 377 if (!pte_present(entry)) 380 goto out; 378 goto out; 381 379 382 damon_hugetlb_mkold(pte, walk->mm, wal 380 damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr); 383 381 384 out: 382 out: 385 spin_unlock(ptl); 383 spin_unlock(ptl); 386 return 0; 384 return 0; 387 } 385 } 388 #else 386 #else 389 #define damon_mkold_hugetlb_entry NULL 387 #define damon_mkold_hugetlb_entry NULL 390 #endif /* CONFIG_HUGETLB_PAGE */ 388 #endif /* CONFIG_HUGETLB_PAGE */ 391 389 392 static const struct mm_walk_ops damon_mkold_op 390 static const struct mm_walk_ops damon_mkold_ops = { 393 .pmd_entry = damon_mkold_pmd_entry, 391 .pmd_entry = damon_mkold_pmd_entry, 394 .hugetlb_entry = damon_mkold_hugetlb_e 392 .hugetlb_entry = damon_mkold_hugetlb_entry, 395 .walk_lock = PGWALK_RDLOCK, 393 .walk_lock = PGWALK_RDLOCK, 396 }; 394 }; 397 395 398 static void damon_va_mkold(struct mm_struct *m 396 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) 399 { 397 { 400 mmap_read_lock(mm); 398 mmap_read_lock(mm); 401 walk_page_range(mm, addr, addr + 1, &d 399 walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); 402 mmap_read_unlock(mm); 400 mmap_read_unlock(mm); 403 } 401 } 404 402 405 /* 403 /* 406 * Functions for the access checking of the re 404 * Functions for the access checking of the regions 407 */ 405 */ 408 406 409 static void __damon_va_prepare_access_check(st 407 static void __damon_va_prepare_access_check(struct mm_struct *mm, 410 struct 408 struct damon_region *r) 411 { 409 { 412 r->sampling_addr = damon_rand(r->ar.st 410 r->sampling_addr = damon_rand(r->ar.start, r->ar.end); 413 411 414 damon_va_mkold(mm, r->sampling_addr); 412 damon_va_mkold(mm, r->sampling_addr); 415 } 413 } 416 414 417 static void damon_va_prepare_access_checks(str 415 static void damon_va_prepare_access_checks(struct damon_ctx *ctx) 418 { 416 { 419 struct damon_target *t; 417 struct damon_target *t; 420 struct mm_struct *mm; 418 struct mm_struct *mm; 421 struct damon_region *r; 419 struct damon_region *r; 422 420 423 damon_for_each_target(t, ctx) { 421 damon_for_each_target(t, ctx) { 424 mm = damon_get_mm(t); 422 mm = damon_get_mm(t); 425 if (!mm) 423 if (!mm) 426 continue; 424 continue; 427 damon_for_each_region(r, t) 425 damon_for_each_region(r, t) 428 __damon_va_prepare_acc 426 __damon_va_prepare_access_check(mm, r); 429 mmput(mm); 427 mmput(mm); 430 } 428 } 431 } 429 } 432 430 433 struct damon_young_walk_private { 431 struct damon_young_walk_private { 434 /* size of the folio for the access ch 432 /* size of the folio for the access checked virtual memory address */ 435 unsigned long *folio_sz; 433 unsigned long *folio_sz; 436 bool young; 434 bool young; 437 }; 435 }; 438 436 439 static int damon_young_pmd_entry(pmd_t *pmd, u 437 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, 440 unsigned long next, struct mm_ 438 unsigned long next, struct mm_walk *walk) 441 { 439 { 442 pte_t *pte; 440 pte_t *pte; 443 pte_t ptent; 441 pte_t ptent; 444 spinlock_t *ptl; 442 spinlock_t *ptl; 445 struct folio *folio; 443 struct folio *folio; 446 struct damon_young_walk_private *priv 444 struct damon_young_walk_private *priv = walk->private; 447 445 448 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 446 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 449 if (pmd_trans_huge(pmdp_get(pmd))) { 447 if (pmd_trans_huge(pmdp_get(pmd))) { 450 pmd_t pmde; 448 pmd_t pmde; 451 449 452 ptl = pmd_lock(walk->mm, pmd); 450 ptl = pmd_lock(walk->mm, pmd); 453 pmde = pmdp_get(pmd); 451 pmde = pmdp_get(pmd); 454 452 455 if (!pmd_present(pmde)) { 453 if (!pmd_present(pmde)) { 456 spin_unlock(ptl); 454 spin_unlock(ptl); 457 return 0; 455 return 0; 458 } 456 } 459 457 460 if (!pmd_trans_huge(pmde)) { 458 if (!pmd_trans_huge(pmde)) { 461 spin_unlock(ptl); 459 spin_unlock(ptl); 462 goto regular_page; 460 goto regular_page; 463 } 461 } 464 folio = damon_get_folio(pmd_pf 462 folio = damon_get_folio(pmd_pfn(pmde)); 465 if (!folio) 463 if (!folio) 466 goto huge_out; 464 goto huge_out; 467 if (pmd_young(pmde) || !folio_ 465 if (pmd_young(pmde) || !folio_test_idle(folio) || 468 mmu_no 466 mmu_notifier_test_young(walk->mm, 469 467 addr)) 470 priv->young = true; 468 priv->young = true; 471 *priv->folio_sz = HPAGE_PMD_SI 469 *priv->folio_sz = HPAGE_PMD_SIZE; 472 folio_put(folio); 470 folio_put(folio); 473 huge_out: 471 huge_out: 474 spin_unlock(ptl); 472 spin_unlock(ptl); 475 return 0; 473 return 0; 476 } 474 } 477 475 478 regular_page: 476 regular_page: 479 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 477 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 480 478 481 pte = pte_offset_map_lock(walk->mm, pm 479 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 482 if (!pte) { 480 if (!pte) { 483 walk->action = ACTION_AGAIN; 481 walk->action = ACTION_AGAIN; 484 return 0; 482 return 0; 485 } 483 } 486 ptent = ptep_get(pte); 484 ptent = ptep_get(pte); 487 if (!pte_present(ptent)) 485 if (!pte_present(ptent)) 488 goto out; 486 goto out; 489 folio = damon_get_folio(pte_pfn(ptent) 487 folio = damon_get_folio(pte_pfn(ptent)); 490 if (!folio) 488 if (!folio) 491 goto out; 489 goto out; 492 if (pte_young(ptent) || !folio_test_id 490 if (pte_young(ptent) || !folio_test_idle(folio) || 493 mmu_notifier_test_youn 491 mmu_notifier_test_young(walk->mm, addr)) 494 priv->young = true; 492 priv->young = true; 495 *priv->folio_sz = folio_size(folio); 493 *priv->folio_sz = folio_size(folio); 496 folio_put(folio); 494 folio_put(folio); 497 out: 495 out: 498 pte_unmap_unlock(pte, ptl); 496 pte_unmap_unlock(pte, ptl); 499 return 0; 497 return 0; 500 } 498 } 501 499 502 #ifdef CONFIG_HUGETLB_PAGE 500 #ifdef CONFIG_HUGETLB_PAGE 503 static int damon_young_hugetlb_entry(pte_t *pt 501 static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, 504 unsigned 502 unsigned long addr, unsigned long end, 505 struct mm 503 struct mm_walk *walk) 506 { 504 { 507 struct damon_young_walk_private *priv 505 struct damon_young_walk_private *priv = walk->private; 508 struct hstate *h = hstate_vma(walk->vm 506 struct hstate *h = hstate_vma(walk->vma); 509 struct folio *folio; 507 struct folio *folio; 510 spinlock_t *ptl; 508 spinlock_t *ptl; 511 pte_t entry; 509 pte_t entry; 512 510 513 ptl = huge_pte_lock(h, walk->mm, pte); 511 ptl = huge_pte_lock(h, walk->mm, pte); 514 entry = huge_ptep_get(walk->mm, addr, !! 512 entry = huge_ptep_get(pte); 515 if (!pte_present(entry)) 513 if (!pte_present(entry)) 516 goto out; 514 goto out; 517 515 518 folio = pfn_folio(pte_pfn(entry)); 516 folio = pfn_folio(pte_pfn(entry)); 519 folio_get(folio); 517 folio_get(folio); 520 518 521 if (pte_young(entry) || !folio_test_id 519 if (pte_young(entry) || !folio_test_idle(folio) || 522 mmu_notifier_test_young(walk->mm, 520 mmu_notifier_test_young(walk->mm, addr)) 523 priv->young = true; 521 priv->young = true; 524 *priv->folio_sz = huge_page_size(h); 522 *priv->folio_sz = huge_page_size(h); 525 523 526 folio_put(folio); 524 folio_put(folio); 527 525 528 out: 526 out: 529 spin_unlock(ptl); 527 spin_unlock(ptl); 530 return 0; 528 return 0; 531 } 529 } 532 #else 530 #else 533 #define damon_young_hugetlb_entry NULL 531 #define damon_young_hugetlb_entry NULL 534 #endif /* CONFIG_HUGETLB_PAGE */ 532 #endif /* CONFIG_HUGETLB_PAGE */ 535 533 536 static const struct mm_walk_ops damon_young_op 534 static const struct mm_walk_ops damon_young_ops = { 537 .pmd_entry = damon_young_pmd_entry, 535 .pmd_entry = damon_young_pmd_entry, 538 .hugetlb_entry = damon_young_hugetlb_e 536 .hugetlb_entry = damon_young_hugetlb_entry, 539 .walk_lock = PGWALK_RDLOCK, 537 .walk_lock = PGWALK_RDLOCK, 540 }; 538 }; 541 539 542 static bool damon_va_young(struct mm_struct *m 540 static bool damon_va_young(struct mm_struct *mm, unsigned long addr, 543 unsigned long *folio_sz) 541 unsigned long *folio_sz) 544 { 542 { 545 struct damon_young_walk_private arg = 543 struct damon_young_walk_private arg = { 546 .folio_sz = folio_sz, 544 .folio_sz = folio_sz, 547 .young = false, 545 .young = false, 548 }; 546 }; 549 547 550 mmap_read_lock(mm); 548 mmap_read_lock(mm); 551 walk_page_range(mm, addr, addr + 1, &d 549 walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); 552 mmap_read_unlock(mm); 550 mmap_read_unlock(mm); 553 return arg.young; 551 return arg.young; 554 } 552 } 555 553 556 /* 554 /* 557 * Check whether the region was accessed after 555 * Check whether the region was accessed after the last preparation 558 * 556 * 559 * mm 'mm_struct' for the given virtual addr 557 * mm 'mm_struct' for the given virtual address space 560 * r the region to be checked 558 * r the region to be checked 561 */ 559 */ 562 static void __damon_va_check_access(struct mm_ 560 static void __damon_va_check_access(struct mm_struct *mm, 563 struct damon_r 561 struct damon_region *r, bool same_target, 564 struct damon_a 562 struct damon_attrs *attrs) 565 { 563 { 566 static unsigned long last_addr; 564 static unsigned long last_addr; 567 static unsigned long last_folio_sz = P 565 static unsigned long last_folio_sz = PAGE_SIZE; 568 static bool last_accessed; 566 static bool last_accessed; 569 567 570 if (!mm) { 568 if (!mm) { 571 damon_update_region_access_rat 569 damon_update_region_access_rate(r, false, attrs); 572 return; 570 return; 573 } 571 } 574 572 575 /* If the region is in the last checke 573 /* If the region is in the last checked page, reuse the result */ 576 if (same_target && (ALIGN_DOWN(last_ad 574 if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) == 577 ALIGN_DOWN(r-> 575 ALIGN_DOWN(r->sampling_addr, last_folio_sz))) { 578 damon_update_region_access_rat 576 damon_update_region_access_rate(r, last_accessed, attrs); 579 return; 577 return; 580 } 578 } 581 579 582 last_accessed = damon_va_young(mm, r-> 580 last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz); 583 damon_update_region_access_rate(r, las 581 damon_update_region_access_rate(r, last_accessed, attrs); 584 582 585 last_addr = r->sampling_addr; 583 last_addr = r->sampling_addr; 586 } 584 } 587 585 588 static unsigned int damon_va_check_accesses(st 586 static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) 589 { 587 { 590 struct damon_target *t; 588 struct damon_target *t; 591 struct mm_struct *mm; 589 struct mm_struct *mm; 592 struct damon_region *r; 590 struct damon_region *r; 593 unsigned int max_nr_accesses = 0; 591 unsigned int max_nr_accesses = 0; 594 bool same_target; 592 bool same_target; 595 593 596 damon_for_each_target(t, ctx) { 594 damon_for_each_target(t, ctx) { 597 mm = damon_get_mm(t); 595 mm = damon_get_mm(t); 598 same_target = false; 596 same_target = false; 599 damon_for_each_region(r, t) { 597 damon_for_each_region(r, t) { 600 __damon_va_check_acces 598 __damon_va_check_access(mm, r, same_target, 601 &ctx-> 599 &ctx->attrs); 602 max_nr_accesses = max( 600 max_nr_accesses = max(r->nr_accesses, max_nr_accesses); 603 same_target = true; 601 same_target = true; 604 } 602 } 605 if (mm) 603 if (mm) 606 mmput(mm); 604 mmput(mm); 607 } 605 } 608 606 609 return max_nr_accesses; 607 return max_nr_accesses; 610 } 608 } 611 609 612 /* 610 /* 613 * Functions for the target validity check and 611 * Functions for the target validity check and cleanup 614 */ 612 */ 615 613 616 static bool damon_va_target_valid(struct damon 614 static bool damon_va_target_valid(struct damon_target *t) 617 { 615 { 618 struct task_struct *task; 616 struct task_struct *task; 619 617 620 task = damon_get_task_struct(t); 618 task = damon_get_task_struct(t); 621 if (task) { 619 if (task) { 622 put_task_struct(task); 620 put_task_struct(task); 623 return true; 621 return true; 624 } 622 } 625 623 626 return false; 624 return false; 627 } 625 } 628 626 629 #ifndef CONFIG_ADVISE_SYSCALLS 627 #ifndef CONFIG_ADVISE_SYSCALLS 630 static unsigned long damos_madvise(struct damo 628 static unsigned long damos_madvise(struct damon_target *target, 631 struct damon_region *r, int be 629 struct damon_region *r, int behavior) 632 { 630 { 633 return 0; 631 return 0; 634 } 632 } 635 #else 633 #else 636 static unsigned long damos_madvise(struct damo 634 static unsigned long damos_madvise(struct damon_target *target, 637 struct damon_region *r, int be 635 struct damon_region *r, int behavior) 638 { 636 { 639 struct mm_struct *mm; 637 struct mm_struct *mm; 640 unsigned long start = PAGE_ALIGN(r->ar 638 unsigned long start = PAGE_ALIGN(r->ar.start); 641 unsigned long len = PAGE_ALIGN(damon_s 639 unsigned long len = PAGE_ALIGN(damon_sz_region(r)); 642 unsigned long applied; 640 unsigned long applied; 643 641 644 mm = damon_get_mm(target); 642 mm = damon_get_mm(target); 645 if (!mm) 643 if (!mm) 646 return 0; 644 return 0; 647 645 648 applied = do_madvise(mm, start, len, b 646 applied = do_madvise(mm, start, len, behavior) ? 0 : len; 649 mmput(mm); 647 mmput(mm); 650 648 651 return applied; 649 return applied; 652 } 650 } 653 #endif /* CONFIG_ADVISE_SYSCALLS */ 651 #endif /* CONFIG_ADVISE_SYSCALLS */ 654 652 655 static unsigned long damon_va_apply_scheme(str 653 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx, 656 struct damon_target *t, struct 654 struct damon_target *t, struct damon_region *r, 657 struct damos *scheme) 655 struct damos *scheme) 658 { 656 { 659 int madv_action; 657 int madv_action; 660 658 661 switch (scheme->action) { 659 switch (scheme->action) { 662 case DAMOS_WILLNEED: 660 case DAMOS_WILLNEED: 663 madv_action = MADV_WILLNEED; 661 madv_action = MADV_WILLNEED; 664 break; 662 break; 665 case DAMOS_COLD: 663 case DAMOS_COLD: 666 madv_action = MADV_COLD; 664 madv_action = MADV_COLD; 667 break; 665 break; 668 case DAMOS_PAGEOUT: 666 case DAMOS_PAGEOUT: 669 madv_action = MADV_PAGEOUT; 667 madv_action = MADV_PAGEOUT; 670 break; 668 break; 671 case DAMOS_HUGEPAGE: 669 case DAMOS_HUGEPAGE: 672 madv_action = MADV_HUGEPAGE; 670 madv_action = MADV_HUGEPAGE; 673 break; 671 break; 674 case DAMOS_NOHUGEPAGE: 672 case DAMOS_NOHUGEPAGE: 675 madv_action = MADV_NOHUGEPAGE; 673 madv_action = MADV_NOHUGEPAGE; 676 break; 674 break; 677 case DAMOS_STAT: 675 case DAMOS_STAT: 678 return 0; 676 return 0; 679 default: 677 default: 680 /* 678 /* 681 * DAMOS actions that are not 679 * DAMOS actions that are not yet supported by 'vaddr'. 682 */ 680 */ 683 return 0; 681 return 0; 684 } 682 } 685 683 686 return damos_madvise(t, r, madv_action 684 return damos_madvise(t, r, madv_action); 687 } 685 } 688 686 689 static int damon_va_scheme_score(struct damon_ 687 static int damon_va_scheme_score(struct damon_ctx *context, 690 struct damon_target *t, struct 688 struct damon_target *t, struct damon_region *r, 691 struct damos *scheme) 689 struct damos *scheme) 692 { 690 { 693 691 694 switch (scheme->action) { 692 switch (scheme->action) { 695 case DAMOS_PAGEOUT: 693 case DAMOS_PAGEOUT: 696 return damon_cold_score(contex 694 return damon_cold_score(context, r, scheme); 697 default: 695 default: 698 break; 696 break; 699 } 697 } 700 698 701 return DAMOS_MAX_SCORE; 699 return DAMOS_MAX_SCORE; 702 } 700 } 703 701 704 static int __init damon_va_initcall(void) 702 static int __init damon_va_initcall(void) 705 { 703 { 706 struct damon_operations ops = { 704 struct damon_operations ops = { 707 .id = DAMON_OPS_VADDR, 705 .id = DAMON_OPS_VADDR, 708 .init = damon_va_init, 706 .init = damon_va_init, 709 .update = damon_va_update, 707 .update = damon_va_update, 710 .prepare_access_checks = damon 708 .prepare_access_checks = damon_va_prepare_access_checks, 711 .check_accesses = damon_va_che 709 .check_accesses = damon_va_check_accesses, 712 .reset_aggregated = NULL, 710 .reset_aggregated = NULL, 713 .target_valid = damon_va_targe 711 .target_valid = damon_va_target_valid, 714 .cleanup = NULL, 712 .cleanup = NULL, 715 .apply_scheme = damon_va_apply 713 .apply_scheme = damon_va_apply_scheme, 716 .get_scheme_score = damon_va_s 714 .get_scheme_score = damon_va_scheme_score, 717 }; 715 }; 718 /* ops for fixed virtual address range 716 /* ops for fixed virtual address ranges */ 719 struct damon_operations ops_fvaddr = o 717 struct damon_operations ops_fvaddr = ops; 720 int err; 718 int err; 721 719 722 /* Don't set the monitoring target reg 720 /* Don't set the monitoring target regions for the entire mapping */ 723 ops_fvaddr.id = DAMON_OPS_FVADDR; 721 ops_fvaddr.id = DAMON_OPS_FVADDR; 724 ops_fvaddr.init = NULL; 722 ops_fvaddr.init = NULL; 725 ops_fvaddr.update = NULL; 723 ops_fvaddr.update = NULL; 726 724 727 err = damon_register_ops(&ops); 725 err = damon_register_ops(&ops); 728 if (err) 726 if (err) 729 return err; 727 return err; 730 return damon_register_ops(&ops_fvaddr) 728 return damon_register_ops(&ops_fvaddr); 731 }; 729 }; 732 730 733 subsys_initcall(damon_va_initcall); 731 subsys_initcall(damon_va_initcall); 734 732 735 #include "tests/vaddr-kunit.h" !! 733 #include "vaddr-test.h" 736 734
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.