1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/memblock.h> 2 #include <linux/memblock.h> 3 #include <linux/compiler.h> 3 #include <linux/compiler.h> 4 #include <linux/fs.h> 4 #include <linux/fs.h> 5 #include <linux/init.h> 5 #include <linux/init.h> 6 #include <linux/ksm.h> 6 #include <linux/ksm.h> 7 #include <linux/mm.h> 7 #include <linux/mm.h> 8 #include <linux/mmzone.h> 8 #include <linux/mmzone.h> 9 #include <linux/huge_mm.h> 9 #include <linux/huge_mm.h> 10 #include <linux/proc_fs.h> 10 #include <linux/proc_fs.h> 11 #include <linux/seq_file.h> 11 #include <linux/seq_file.h> 12 #include <linux/hugetlb.h> 12 #include <linux/hugetlb.h> 13 #include <linux/memremap.h> 13 #include <linux/memremap.h> 14 #include <linux/memcontrol.h> 14 #include <linux/memcontrol.h> 15 #include <linux/mmu_notifier.h> 15 #include <linux/mmu_notifier.h> 16 #include <linux/page_idle.h> 16 #include <linux/page_idle.h> 17 #include <linux/kernel-page-flags.h> 17 #include <linux/kernel-page-flags.h> 18 #include <linux/uaccess.h> 18 #include <linux/uaccess.h> 19 #include "internal.h" 19 #include "internal.h" 20 20 21 #define KPMSIZE sizeof(u64) 21 #define KPMSIZE sizeof(u64) 22 #define KPMMASK (KPMSIZE - 1) 22 #define KPMMASK (KPMSIZE - 1) 23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE) 23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE) 24 24 25 static inline unsigned long get_max_dump_pfn(v 25 static inline unsigned long get_max_dump_pfn(void) 26 { 26 { 27 #ifdef CONFIG_SPARSEMEM 27 #ifdef CONFIG_SPARSEMEM 28 /* 28 /* 29 * The memmap of early sections is com 29 * The memmap of early sections is completely populated and marked 30 * online even if max_pfn does not fal 30 * online even if max_pfn does not fall on a section boundary - 31 * pfn_to_online_page() will succeed o 31 * pfn_to_online_page() will succeed on all pages. Allow inspecting 32 * these memmaps. 32 * these memmaps. 33 */ 33 */ 34 return round_up(max_pfn, PAGES_PER_SEC 34 return round_up(max_pfn, PAGES_PER_SECTION); 35 #else 35 #else 36 return max_pfn; 36 return max_pfn; 37 #endif 37 #endif 38 } 38 } 39 39 40 /* /proc/kpagecount - an array exposing page m !! 40 /* /proc/kpagecount - an array exposing page counts 41 * 41 * 42 * Each entry is a u64 representing the corres 42 * Each entry is a u64 representing the corresponding 43 * physical page mapcount. !! 43 * physical page count. 44 */ 44 */ 45 static ssize_t kpagecount_read(struct file *fi 45 static ssize_t kpagecount_read(struct file *file, char __user *buf, 46 size_t count, lof 46 size_t count, loff_t *ppos) 47 { 47 { 48 const unsigned long max_dump_pfn = get 48 const unsigned long max_dump_pfn = get_max_dump_pfn(); 49 u64 __user *out = (u64 __user *)buf; 49 u64 __user *out = (u64 __user *)buf; >> 50 struct page *ppage; 50 unsigned long src = *ppos; 51 unsigned long src = *ppos; 51 unsigned long pfn; 52 unsigned long pfn; 52 ssize_t ret = 0; 53 ssize_t ret = 0; >> 54 u64 pcount; 53 55 54 pfn = src / KPMSIZE; 56 pfn = src / KPMSIZE; 55 if (src & KPMMASK || count & KPMMASK) 57 if (src & KPMMASK || count & KPMMASK) 56 return -EINVAL; 58 return -EINVAL; 57 if (src >= max_dump_pfn * KPMSIZE) 59 if (src >= max_dump_pfn * KPMSIZE) 58 return 0; 60 return 0; 59 count = min_t(unsigned long, count, (m 61 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); 60 62 61 while (count > 0) { 63 while (count > 0) { 62 struct page *page; << 63 u64 mapcount = 0; << 64 << 65 /* 64 /* 66 * TODO: ZONE_DEVICE support r 65 * TODO: ZONE_DEVICE support requires to identify 67 * memmaps that were actually 66 * memmaps that were actually initialized. 68 */ 67 */ 69 page = pfn_to_online_page(pfn) !! 68 ppage = pfn_to_online_page(pfn); 70 if (page) !! 69 71 mapcount = folio_preci !! 70 if (!ppage) 72 !! 71 pcount = 0; >> 72 else >> 73 pcount = page_mapcount(ppage); 73 74 74 if (put_user(mapcount, out)) { !! 75 if (put_user(pcount, out)) { 75 ret = -EFAULT; 76 ret = -EFAULT; 76 break; 77 break; 77 } 78 } 78 79 79 pfn++; 80 pfn++; 80 out++; 81 out++; 81 count -= KPMSIZE; 82 count -= KPMSIZE; 82 83 83 cond_resched(); 84 cond_resched(); 84 } 85 } 85 86 86 *ppos += (char __user *)out - buf; 87 *ppos += (char __user *)out - buf; 87 if (!ret) 88 if (!ret) 88 ret = (char __user *)out - buf 89 ret = (char __user *)out - buf; 89 return ret; 90 return ret; 90 } 91 } 91 92 92 static const struct proc_ops kpagecount_proc_o 93 static const struct proc_ops kpagecount_proc_ops = { 93 .proc_flags = PROC_ENTRY_PERMANENT 94 .proc_flags = PROC_ENTRY_PERMANENT, 94 .proc_lseek = mem_lseek, 95 .proc_lseek = mem_lseek, 95 .proc_read = kpagecount_read, 96 .proc_read = kpagecount_read, 96 }; 97 }; 97 98 98 /* /proc/kpageflags - an array exposing page f 99 /* /proc/kpageflags - an array exposing page flags 99 * 100 * 100 * Each entry is a u64 representing the corres 101 * Each entry is a u64 representing the corresponding 101 * physical page flags. 102 * physical page flags. 102 */ 103 */ 103 104 104 static inline u64 kpf_copy_bit(u64 kflags, int 105 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) 105 { 106 { 106 return ((kflags >> kbit) & 1) << ubit; 107 return ((kflags >> kbit) & 1) << ubit; 107 } 108 } 108 109 109 u64 stable_page_flags(const struct page *page) 110 u64 stable_page_flags(const struct page *page) 110 { 111 { 111 const struct folio *folio; 112 const struct folio *folio; 112 unsigned long k; 113 unsigned long k; 113 unsigned long mapping; 114 unsigned long mapping; 114 bool is_anon; 115 bool is_anon; 115 u64 u = 0; 116 u64 u = 0; 116 117 117 /* 118 /* 118 * pseudo flag: KPF_NOPAGE 119 * pseudo flag: KPF_NOPAGE 119 * it differentiates a memory hole fro 120 * it differentiates a memory hole from a page with no flags 120 */ 121 */ 121 if (!page) 122 if (!page) 122 return 1 << KPF_NOPAGE; 123 return 1 << KPF_NOPAGE; 123 folio = page_folio(page); 124 folio = page_folio(page); 124 125 125 k = folio->flags; 126 k = folio->flags; 126 mapping = (unsigned long)folio->mappin 127 mapping = (unsigned long)folio->mapping; 127 is_anon = mapping & PAGE_MAPPING_ANON; 128 is_anon = mapping & PAGE_MAPPING_ANON; 128 129 129 /* 130 /* 130 * pseudo flags for the well known (an 131 * pseudo flags for the well known (anonymous) memory mapped pages 131 */ 132 */ 132 if (page_mapped(page)) 133 if (page_mapped(page)) 133 u |= 1 << KPF_MMAP; 134 u |= 1 << KPF_MMAP; 134 if (is_anon) { 135 if (is_anon) { 135 u |= 1 << KPF_ANON; 136 u |= 1 << KPF_ANON; 136 if (mapping & PAGE_MAPPING_KSM 137 if (mapping & PAGE_MAPPING_KSM) 137 u |= 1 << KPF_KSM; 138 u |= 1 << KPF_KSM; 138 } 139 } 139 140 140 /* 141 /* 141 * compound pages: export both head/ta 142 * compound pages: export both head/tail info 142 * they together define a compound pag 143 * they together define a compound page's start/end pos and order 143 */ 144 */ 144 if (page == &folio->page) 145 if (page == &folio->page) 145 u |= kpf_copy_bit(k, KPF_COMPO 146 u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head); 146 else 147 else 147 u |= 1 << KPF_COMPOUND_TAIL; 148 u |= 1 << KPF_COMPOUND_TAIL; 148 if (folio_test_hugetlb(folio)) 149 if (folio_test_hugetlb(folio)) 149 u |= 1 << KPF_HUGE; 150 u |= 1 << KPF_HUGE; 150 else if (folio_test_large(folio) && !! 151 /* 151 folio_test_large_rmappable(fo !! 152 * We need to check PageLRU/PageAnon 152 /* Note: we indicate any THPs !! 153 * to make sure a given page is a thp, not a non-huge compound page. 153 u |= 1 << KPF_THP; !! 154 */ 154 } else if (is_huge_zero_folio(folio)) !! 155 else if (folio_test_large(folio)) { 155 u |= 1 << KPF_ZERO_PAGE; !! 156 if ((k & (1 << PG_lru)) || is_anon) 156 u |= 1 << KPF_THP; !! 157 u |= 1 << KPF_THP; 157 } else if (is_zero_folio(folio)) { !! 158 else if (is_huge_zero_folio(folio)) { >> 159 u |= 1 << KPF_ZERO_PAGE; >> 160 u |= 1 << KPF_THP; >> 161 } >> 162 } else if (is_zero_pfn(page_to_pfn(page))) 158 u |= 1 << KPF_ZERO_PAGE; 163 u |= 1 << KPF_ZERO_PAGE; 159 } << 160 164 161 /* 165 /* 162 * Caveats on high order pages: PG_bud 166 * Caveats on high order pages: PG_buddy and PG_slab will only be set 163 * on the head page. 167 * on the head page. 164 */ 168 */ 165 if (PageBuddy(page)) 169 if (PageBuddy(page)) 166 u |= 1 << KPF_BUDDY; 170 u |= 1 << KPF_BUDDY; 167 else if (page_count(page) == 0 && is_f 171 else if (page_count(page) == 0 && is_free_buddy_page(page)) 168 u |= 1 << KPF_BUDDY; 172 u |= 1 << KPF_BUDDY; 169 173 170 if (PageOffline(page)) 174 if (PageOffline(page)) 171 u |= 1 << KPF_OFFLINE; 175 u |= 1 << KPF_OFFLINE; 172 if (PageTable(page)) 176 if (PageTable(page)) 173 u |= 1 << KPF_PGTABLE; 177 u |= 1 << KPF_PGTABLE; 174 if (folio_test_slab(folio)) 178 if (folio_test_slab(folio)) 175 u |= 1 << KPF_SLAB; 179 u |= 1 << KPF_SLAB; 176 180 177 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined( 181 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 178 u |= kpf_copy_bit(k, KPF_IDLE, 182 u |= kpf_copy_bit(k, KPF_IDLE, PG_idle); 179 #else 183 #else 180 if (folio_test_idle(folio)) 184 if (folio_test_idle(folio)) 181 u |= 1 << KPF_IDLE; 185 u |= 1 << KPF_IDLE; 182 #endif 186 #endif 183 187 184 u |= kpf_copy_bit(k, KPF_LOCKED, 188 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); >> 189 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); 185 u |= kpf_copy_bit(k, KPF_DIRTY, 190 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); 186 u |= kpf_copy_bit(k, KPF_UPTODATE, 191 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); 187 u |= kpf_copy_bit(k, KPF_WRITEBACK, 192 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); 188 193 189 u |= kpf_copy_bit(k, KPF_LRU, 194 u |= kpf_copy_bit(k, KPF_LRU, PG_lru); 190 u |= kpf_copy_bit(k, KPF_REFERENCED, 195 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); 191 u |= kpf_copy_bit(k, KPF_ACTIVE, 196 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 192 u |= kpf_copy_bit(k, KPF_RECLAIM, 197 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 193 198 194 #define SWAPCACHE ((1 << PG_swapbacked) | (1 < 199 #define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache)) 195 if ((k & SWAPCACHE) == SWAPCACHE) 200 if ((k & SWAPCACHE) == SWAPCACHE) 196 u |= 1 << KPF_SWAPCACHE; 201 u |= 1 << KPF_SWAPCACHE; 197 u |= kpf_copy_bit(k, KPF_SWAPBACKED, 202 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 198 203 199 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, 204 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 200 u |= kpf_copy_bit(k, KPF_MLOCKED, 205 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 201 206 202 #ifdef CONFIG_MEMORY_FAILURE 207 #ifdef CONFIG_MEMORY_FAILURE 203 if (u & (1 << KPF_HUGE)) 208 if (u & (1 << KPF_HUGE)) 204 u |= kpf_copy_bit(k, KPF_HWPOI 209 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 205 else 210 else 206 u |= kpf_copy_bit(page->flags, 211 u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison); 207 #endif 212 #endif 208 213 >> 214 #ifdef CONFIG_ARCH_USES_PG_UNCACHED >> 215 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); >> 216 #endif >> 217 209 u |= kpf_copy_bit(k, KPF_RESERVED, 218 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); 210 u |= kpf_copy_bit(k, KPF_OWNER_2, !! 219 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); 211 u |= kpf_copy_bit(k, KPF_PRIVATE, 220 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); 212 u |= kpf_copy_bit(k, KPF_PRIVATE_2, 221 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); 213 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE 222 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); 214 u |= kpf_copy_bit(k, KPF_ARCH, 223 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); 215 #ifdef CONFIG_ARCH_USES_PG_ARCH_2 !! 224 #ifdef CONFIG_ARCH_USES_PG_ARCH_X 216 u |= kpf_copy_bit(k, KPF_ARCH_2, 225 u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2); 217 #endif << 218 #ifdef CONFIG_ARCH_USES_PG_ARCH_3 << 219 u |= kpf_copy_bit(k, KPF_ARCH_3, 226 u |= kpf_copy_bit(k, KPF_ARCH_3, PG_arch_3); 220 #endif 227 #endif 221 228 222 return u; 229 return u; 223 }; 230 }; 224 231 225 static ssize_t kpageflags_read(struct file *fi 232 static ssize_t kpageflags_read(struct file *file, char __user *buf, 226 size_t count, lof 233 size_t count, loff_t *ppos) 227 { 234 { 228 const unsigned long max_dump_pfn = get 235 const unsigned long max_dump_pfn = get_max_dump_pfn(); 229 u64 __user *out = (u64 __user *)buf; 236 u64 __user *out = (u64 __user *)buf; 230 unsigned long src = *ppos; 237 unsigned long src = *ppos; 231 unsigned long pfn; 238 unsigned long pfn; 232 ssize_t ret = 0; 239 ssize_t ret = 0; 233 240 234 pfn = src / KPMSIZE; 241 pfn = src / KPMSIZE; 235 if (src & KPMMASK || count & KPMMASK) 242 if (src & KPMMASK || count & KPMMASK) 236 return -EINVAL; 243 return -EINVAL; 237 if (src >= max_dump_pfn * KPMSIZE) 244 if (src >= max_dump_pfn * KPMSIZE) 238 return 0; 245 return 0; 239 count = min_t(unsigned long, count, (m 246 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); 240 247 241 while (count > 0) { 248 while (count > 0) { 242 /* 249 /* 243 * TODO: ZONE_DEVICE support r 250 * TODO: ZONE_DEVICE support requires to identify 244 * memmaps that were actually 251 * memmaps that were actually initialized. 245 */ 252 */ 246 struct page *page = pfn_to_onl 253 struct page *page = pfn_to_online_page(pfn); 247 254 248 if (put_user(stable_page_flags 255 if (put_user(stable_page_flags(page), out)) { 249 ret = -EFAULT; 256 ret = -EFAULT; 250 break; 257 break; 251 } 258 } 252 259 253 pfn++; 260 pfn++; 254 out++; 261 out++; 255 count -= KPMSIZE; 262 count -= KPMSIZE; 256 263 257 cond_resched(); 264 cond_resched(); 258 } 265 } 259 266 260 *ppos += (char __user *)out - buf; 267 *ppos += (char __user *)out - buf; 261 if (!ret) 268 if (!ret) 262 ret = (char __user *)out - buf 269 ret = (char __user *)out - buf; 263 return ret; 270 return ret; 264 } 271 } 265 272 266 static const struct proc_ops kpageflags_proc_o 273 static const struct proc_ops kpageflags_proc_ops = { 267 .proc_flags = PROC_ENTRY_PERMANENT 274 .proc_flags = PROC_ENTRY_PERMANENT, 268 .proc_lseek = mem_lseek, 275 .proc_lseek = mem_lseek, 269 .proc_read = kpageflags_read, 276 .proc_read = kpageflags_read, 270 }; 277 }; 271 278 272 #ifdef CONFIG_MEMCG 279 #ifdef CONFIG_MEMCG 273 static ssize_t kpagecgroup_read(struct file *f 280 static ssize_t kpagecgroup_read(struct file *file, char __user *buf, 274 size_t count, 281 size_t count, loff_t *ppos) 275 { 282 { 276 const unsigned long max_dump_pfn = get 283 const unsigned long max_dump_pfn = get_max_dump_pfn(); 277 u64 __user *out = (u64 __user *)buf; 284 u64 __user *out = (u64 __user *)buf; 278 struct page *ppage; 285 struct page *ppage; 279 unsigned long src = *ppos; 286 unsigned long src = *ppos; 280 unsigned long pfn; 287 unsigned long pfn; 281 ssize_t ret = 0; 288 ssize_t ret = 0; 282 u64 ino; 289 u64 ino; 283 290 284 pfn = src / KPMSIZE; 291 pfn = src / KPMSIZE; 285 if (src & KPMMASK || count & KPMMASK) 292 if (src & KPMMASK || count & KPMMASK) 286 return -EINVAL; 293 return -EINVAL; 287 if (src >= max_dump_pfn * KPMSIZE) 294 if (src >= max_dump_pfn * KPMSIZE) 288 return 0; 295 return 0; 289 count = min_t(unsigned long, count, (m 296 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); 290 297 291 while (count > 0) { 298 while (count > 0) { 292 /* 299 /* 293 * TODO: ZONE_DEVICE support r 300 * TODO: ZONE_DEVICE support requires to identify 294 * memmaps that were actually 301 * memmaps that were actually initialized. 295 */ 302 */ 296 ppage = pfn_to_online_page(pfn 303 ppage = pfn_to_online_page(pfn); 297 304 298 if (ppage) 305 if (ppage) 299 ino = page_cgroup_ino( 306 ino = page_cgroup_ino(ppage); 300 else 307 else 301 ino = 0; 308 ino = 0; 302 309 303 if (put_user(ino, out)) { 310 if (put_user(ino, out)) { 304 ret = -EFAULT; 311 ret = -EFAULT; 305 break; 312 break; 306 } 313 } 307 314 308 pfn++; 315 pfn++; 309 out++; 316 out++; 310 count -= KPMSIZE; 317 count -= KPMSIZE; 311 318 312 cond_resched(); 319 cond_resched(); 313 } 320 } 314 321 315 *ppos += (char __user *)out - buf; 322 *ppos += (char __user *)out - buf; 316 if (!ret) 323 if (!ret) 317 ret = (char __user *)out - buf 324 ret = (char __user *)out - buf; 318 return ret; 325 return ret; 319 } 326 } 320 327 321 static const struct proc_ops kpagecgroup_proc_ 328 static const struct proc_ops kpagecgroup_proc_ops = { 322 .proc_flags = PROC_ENTRY_PERMANENT 329 .proc_flags = PROC_ENTRY_PERMANENT, 323 .proc_lseek = mem_lseek, 330 .proc_lseek = mem_lseek, 324 .proc_read = kpagecgroup_read, 331 .proc_read = kpagecgroup_read, 325 }; 332 }; 326 #endif /* CONFIG_MEMCG */ 333 #endif /* CONFIG_MEMCG */ 327 334 328 static int __init proc_page_init(void) 335 static int __init proc_page_init(void) 329 { 336 { 330 proc_create("kpagecount", S_IRUSR, NUL 337 proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops); 331 proc_create("kpageflags", S_IRUSR, NUL 338 proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops); 332 #ifdef CONFIG_MEMCG 339 #ifdef CONFIG_MEMCG 333 proc_create("kpagecgroup", S_IRUSR, NU 340 proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops); 334 #endif 341 #endif 335 return 0; 342 return 0; 336 } 343 } 337 fs_initcall(proc_page_init); 344 fs_initcall(proc_page_init); 338 345
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.