1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/init.h> 3 #include <linux/memblock.h> 4 #include <linux/fs.h> 5 #include <linux/sysfs.h> 6 #include <linux/kobject.h> 7 #include <linux/memory_hotplug.h> 8 #include <linux/mm.h> 9 #include <linux/mmzone.h> 10 #include <linux/pagemap.h> 11 #include <linux/rmap.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/page_ext.h> 14 #include <linux/page_idle.h> 15 16 #include "internal.h" 17 18 #define BITMAP_CHUNK_SIZE sizeof(u64) 19 #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) 20 21 /* 22 * Idle page tracking only considers user memory pages, for other types of 23 * pages the idle flag is always unset and an attempt to set it is silently 24 * ignored. 25 * 26 * We treat a page as a user memory page if it is on an LRU list, because it is 27 * always safe to pass such a page to rmap_walk(), which is essential for idle 28 * page tracking. With such an indicator of user pages we can skip isolated 29 * pages, but since there are not usually many of them, it will hardly affect 30 * the overall result. 31 * 32 * This function tries to get a user memory page by pfn as described above. 33 */ 34 static struct folio *page_idle_get_folio(unsigned long pfn) 35 { 36 struct page *page = pfn_to_online_page(pfn); 37 struct folio *folio; 38 39 if (!page || PageTail(page)) 40 return NULL; 41 42 folio = page_folio(page); 43 if (!folio_test_lru(folio) || !folio_try_get(folio)) 44 return NULL; 45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { 46 folio_put(folio); 47 folio = NULL; 48 } 49 return folio; 50 } 51 52 static bool page_idle_clear_pte_refs_one(struct folio *folio, 53 struct vm_area_struct *vma, 54 unsigned long addr, void *arg) 55 { 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); 57 bool referenced = false; 58 59 while (page_vma_mapped_walk(&pvmw)) { 60 addr = pvmw.address; 61 if (pvmw.pte) { 62 /* 63 * For PTE-mapped THP, one sub page is referenced, 64 * the whole THP is referenced. 65 */ 66 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) 67 referenced = true; 68 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 69 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) 70 referenced = true; 71 } else { 72 /* unexpected pmd-mapped page? */ 73 WARN_ON_ONCE(1); 74 } 75 } 76 77 if (referenced) { 78 folio_clear_idle(folio); 79 /* 80 * We cleared the referenced bit in a mapping to this page. To 81 * avoid interference with page reclaim, mark it young so that 82 * folio_referenced() will return > 0. 83 */ 84 folio_set_young(folio); 85 } 86 return true; 87 } 88 89 static void page_idle_clear_pte_refs(struct folio *folio) 90 { 91 /* 92 * Since rwc.try_lock is unused, rwc is effectively immutable, so we 93 * can make it static to save some cycles and stack. 94 */ 95 static struct rmap_walk_control rwc = { 96 .rmap_one = page_idle_clear_pte_refs_one, 97 .anon_lock = folio_lock_anon_vma_read, 98 }; 99 bool need_lock; 100 101 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) 102 return; 103 104 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); 105 if (need_lock && !folio_trylock(folio)) 106 return; 107 108 rmap_walk(folio, &rwc); 109 110 if (need_lock) 111 folio_unlock(folio); 112 } 113 114 static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, 115 struct bin_attribute *attr, char *buf, 116 loff_t pos, size_t count) 117 { 118 u64 *out = (u64 *)buf; 119 struct folio *folio; 120 unsigned long pfn, end_pfn; 121 int bit; 122 123 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) 124 return -EINVAL; 125 126 pfn = pos * BITS_PER_BYTE; 127 if (pfn >= max_pfn) 128 return 0; 129 130 end_pfn = pfn + count * BITS_PER_BYTE; 131 if (end_pfn > max_pfn) 132 end_pfn = max_pfn; 133 134 for (; pfn < end_pfn; pfn++) { 135 bit = pfn % BITMAP_CHUNK_BITS; 136 if (!bit) 137 *out = 0ULL; 138 folio = page_idle_get_folio(pfn); 139 if (folio) { 140 if (folio_test_idle(folio)) { 141 /* 142 * The page might have been referenced via a 143 * pte, in which case it is not idle. Clear 144 * refs and recheck. 145 */ 146 page_idle_clear_pte_refs(folio); 147 if (folio_test_idle(folio)) 148 *out |= 1ULL << bit; 149 } 150 folio_put(folio); 151 } 152 if (bit == BITMAP_CHUNK_BITS - 1) 153 out++; 154 cond_resched(); 155 } 156 return (char *)out - buf; 157 } 158 159 static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, 160 struct bin_attribute *attr, char *buf, 161 loff_t pos, size_t count) 162 { 163 const u64 *in = (u64 *)buf; 164 struct folio *folio; 165 unsigned long pfn, end_pfn; 166 int bit; 167 168 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) 169 return -EINVAL; 170 171 pfn = pos * BITS_PER_BYTE; 172 if (pfn >= max_pfn) 173 return -ENXIO; 174 175 end_pfn = pfn + count * BITS_PER_BYTE; 176 if (end_pfn > max_pfn) 177 end_pfn = max_pfn; 178 179 for (; pfn < end_pfn; pfn++) { 180 bit = pfn % BITMAP_CHUNK_BITS; 181 if ((*in >> bit) & 1) { 182 folio = page_idle_get_folio(pfn); 183 if (folio) { 184 page_idle_clear_pte_refs(folio); 185 folio_set_idle(folio); 186 folio_put(folio); 187 } 188 } 189 if (bit == BITMAP_CHUNK_BITS - 1) 190 in++; 191 cond_resched(); 192 } 193 return (char *)in - buf; 194 } 195 196 static struct bin_attribute page_idle_bitmap_attr = 197 __BIN_ATTR(bitmap, 0600, 198 page_idle_bitmap_read, page_idle_bitmap_write, 0); 199 200 static struct bin_attribute *page_idle_bin_attrs[] = { 201 &page_idle_bitmap_attr, 202 NULL, 203 }; 204 205 static const struct attribute_group page_idle_attr_group = { 206 .bin_attrs = page_idle_bin_attrs, 207 .name = "page_idle", 208 }; 209 210 static int __init page_idle_init(void) 211 { 212 int err; 213 214 err = sysfs_create_group(mm_kobj, &page_idle_attr_group); 215 if (err) { 216 pr_err("page_idle: register sysfs failed\n"); 217 return err; 218 } 219 return 0; 220 } 221 subsys_initcall(page_idle_init); 222
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.