1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/memblock.h> !! 2 #include <linux/bootmem.h> 3 #include <linux/compiler.h> 3 #include <linux/compiler.h> 4 #include <linux/fs.h> 4 #include <linux/fs.h> 5 #include <linux/init.h> 5 #include <linux/init.h> 6 #include <linux/ksm.h> 6 #include <linux/ksm.h> 7 #include <linux/mm.h> 7 #include <linux/mm.h> 8 #include <linux/mmzone.h> 8 #include <linux/mmzone.h> 9 #include <linux/huge_mm.h> 9 #include <linux/huge_mm.h> 10 #include <linux/proc_fs.h> 10 #include <linux/proc_fs.h> 11 #include <linux/seq_file.h> 11 #include <linux/seq_file.h> 12 #include <linux/hugetlb.h> 12 #include <linux/hugetlb.h> 13 #include <linux/memremap.h> << 14 #include <linux/memcontrol.h> 13 #include <linux/memcontrol.h> 15 #include <linux/mmu_notifier.h> 14 #include <linux/mmu_notifier.h> 16 #include <linux/page_idle.h> 15 #include <linux/page_idle.h> 17 #include <linux/kernel-page-flags.h> 16 #include <linux/kernel-page-flags.h> 18 #include <linux/uaccess.h> 17 #include <linux/uaccess.h> 19 #include "internal.h" 18 #include "internal.h" 20 19 21 #define KPMSIZE sizeof(u64) 20 #define KPMSIZE sizeof(u64) 22 #define KPMMASK (KPMSIZE - 1) 21 #define KPMMASK (KPMSIZE - 1) 23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE) 22 #define KPMBITS (KPMSIZE * BITS_PER_BYTE) 24 23 25 static inline unsigned long get_max_dump_pfn(v !! 24 /* /proc/kpagecount - an array exposing page counts 26 { << 27 #ifdef CONFIG_SPARSEMEM << 28 /* << 29 * The memmap of early sections is com << 30 * online even if max_pfn does not fal << 31 * pfn_to_online_page() will succeed o << 32 * these memmaps. << 33 */ << 34 return round_up(max_pfn, PAGES_PER_SEC << 35 #else << 36 return max_pfn; << 37 #endif << 38 } << 39 << 40 /* /proc/kpagecount - an array exposing page m << 41 * 25 * 42 * Each entry is a u64 representing the corres 26 * Each entry is a u64 representing the corresponding 43 * physical page mapcount. !! 27 * physical page count. 44 */ 28 */ 45 static ssize_t kpagecount_read(struct file *fi 29 static ssize_t kpagecount_read(struct file *file, char __user *buf, 46 size_t count, lof 30 size_t count, loff_t *ppos) 47 { 31 { 48 const unsigned long max_dump_pfn = get << 49 u64 __user *out = (u64 __user *)buf; 32 u64 __user *out = (u64 __user *)buf; >> 33 struct page *ppage; 50 unsigned long src = *ppos; 34 unsigned long src = *ppos; 51 unsigned long pfn; 35 unsigned long pfn; 52 ssize_t ret = 0; 36 ssize_t ret = 0; >> 37 u64 pcount; 53 38 54 pfn = src / KPMSIZE; 39 pfn = src / KPMSIZE; >> 40 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); 55 if (src & KPMMASK || count & KPMMASK) 41 if (src & KPMMASK || count & KPMMASK) 56 return -EINVAL; 42 return -EINVAL; 57 if (src >= max_dump_pfn * KPMSIZE) << 58 return 0; << 59 count = min_t(unsigned long, count, (m << 60 43 61 while (count > 0) { 44 while (count > 0) { 62 struct page *page; !! 45 if (pfn_valid(pfn)) 63 u64 mapcount = 0; !! 46 ppage = pfn_to_page(pfn); 64 !! 47 else 65 /* !! 48 ppage = NULL; 66 * TODO: ZONE_DEVICE support r !! 49 if (!ppage || PageSlab(ppage)) 67 * memmaps that were actually !! 50 pcount = 0; 68 */ !! 51 else 69 page = pfn_to_online_page(pfn) !! 52 pcount = page_mapcount(ppage); 70 if (page) << 71 mapcount = folio_preci << 72 << 73 53 74 if (put_user(mapcount, out)) { !! 54 if (put_user(pcount, out)) { 75 ret = -EFAULT; 55 ret = -EFAULT; 76 break; 56 break; 77 } 57 } 78 58 79 pfn++; 59 pfn++; 80 out++; 60 out++; 81 count -= KPMSIZE; 61 count -= KPMSIZE; 82 62 83 cond_resched(); 63 cond_resched(); 84 } 64 } 85 65 86 *ppos += (char __user *)out - buf; 66 *ppos += (char __user *)out - buf; 87 if (!ret) 67 if (!ret) 88 ret = (char __user *)out - buf 68 ret = (char __user *)out - buf; 89 return ret; 69 return ret; 90 } 70 } 91 71 92 static const struct proc_ops kpagecount_proc_o !! 72 static const struct file_operations proc_kpagecount_operations = { 93 .proc_flags = PROC_ENTRY_PERMANENT !! 73 .llseek = mem_lseek, 94 .proc_lseek = mem_lseek, !! 74 .read = kpagecount_read, 95 .proc_read = kpagecount_read, << 96 }; 75 }; 97 76 98 /* /proc/kpageflags - an array exposing page f 77 /* /proc/kpageflags - an array exposing page flags 99 * 78 * 100 * Each entry is a u64 representing the corres 79 * Each entry is a u64 representing the corresponding 101 * physical page flags. 80 * physical page flags. 102 */ 81 */ 103 82 104 static inline u64 kpf_copy_bit(u64 kflags, int 83 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) 105 { 84 { 106 return ((kflags >> kbit) & 1) << ubit; 85 return ((kflags >> kbit) & 1) << ubit; 107 } 86 } 108 87 109 u64 stable_page_flags(const struct page *page) !! 88 u64 stable_page_flags(struct page *page) 110 { 89 { 111 const struct folio *folio; !! 90 u64 k; 112 unsigned long k; !! 91 u64 u; 113 unsigned long mapping; << 114 bool is_anon; << 115 u64 u = 0; << 116 92 117 /* 93 /* 118 * pseudo flag: KPF_NOPAGE 94 * pseudo flag: KPF_NOPAGE 119 * it differentiates a memory hole fro 95 * it differentiates a memory hole from a page with no flags 120 */ 96 */ 121 if (!page) 97 if (!page) 122 return 1 << KPF_NOPAGE; 98 return 1 << KPF_NOPAGE; 123 folio = page_folio(page); << 124 99 125 k = folio->flags; !! 100 k = page->flags; 126 mapping = (unsigned long)folio->mappin !! 101 u = 0; 127 is_anon = mapping & PAGE_MAPPING_ANON; << 128 102 129 /* 103 /* 130 * pseudo flags for the well known (an 104 * pseudo flags for the well known (anonymous) memory mapped pages >> 105 * >> 106 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the >> 107 * simple test in page_mapped() is not enough. 131 */ 108 */ 132 if (page_mapped(page)) !! 109 if (!PageSlab(page) && page_mapped(page)) 133 u |= 1 << KPF_MMAP; 110 u |= 1 << KPF_MMAP; 134 if (is_anon) { !! 111 if (PageAnon(page)) 135 u |= 1 << KPF_ANON; 112 u |= 1 << KPF_ANON; 136 if (mapping & PAGE_MAPPING_KSM !! 113 if (PageKsm(page)) 137 u |= 1 << KPF_KSM; !! 114 u |= 1 << KPF_KSM; 138 } << 139 115 140 /* 116 /* 141 * compound pages: export both head/ta 117 * compound pages: export both head/tail info 142 * they together define a compound pag 118 * they together define a compound page's start/end pos and order 143 */ 119 */ 144 if (page == &folio->page) !! 120 if (PageHead(page)) 145 u |= kpf_copy_bit(k, KPF_COMPO !! 121 u |= 1 << KPF_COMPOUND_HEAD; 146 else !! 122 if (PageTail(page)) 147 u |= 1 << KPF_COMPOUND_TAIL; 123 u |= 1 << KPF_COMPOUND_TAIL; 148 if (folio_test_hugetlb(folio)) !! 124 if (PageHuge(page)) 149 u |= 1 << KPF_HUGE; 125 u |= 1 << KPF_HUGE; 150 else if (folio_test_large(folio) && !! 126 /* 151 folio_test_large_rmappable(fo !! 127 * PageTransCompound can be true for non-huge compound pages (slab 152 /* Note: we indicate any THPs !! 128 * pages or pages allocated by drivers with __GFP_COMP) because it 153 u |= 1 << KPF_THP; !! 129 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 154 } else if (is_huge_zero_folio(folio)) !! 130 * to make sure a given page is a thp, not a non-huge compound page. 155 u |= 1 << KPF_ZERO_PAGE; !! 131 */ 156 u |= 1 << KPF_THP; !! 132 else if (PageTransCompound(page)) { 157 } else if (is_zero_folio(folio)) { !! 133 struct page *head = compound_head(page); >> 134 >> 135 if (PageLRU(head) || PageAnon(head)) >> 136 u |= 1 << KPF_THP; >> 137 else if (is_huge_zero_page(head)) { >> 138 u |= 1 << KPF_ZERO_PAGE; >> 139 u |= 1 << KPF_THP; >> 140 } >> 141 } else if (is_zero_pfn(page_to_pfn(page))) 158 u |= 1 << KPF_ZERO_PAGE; 142 u |= 1 << KPF_ZERO_PAGE; 159 } !! 143 160 144 161 /* 145 /* 162 * Caveats on high order pages: PG_bud !! 146 * Caveats on high order pages: page->_refcount will only be set 163 * on the head page. !! 147 * -1 on the head page; SLUB/SLQB do the same for PG_slab; >> 148 * SLOB won't set PG_slab at all on compound pages. 164 */ 149 */ 165 if (PageBuddy(page)) 150 if (PageBuddy(page)) 166 u |= 1 << KPF_BUDDY; 151 u |= 1 << KPF_BUDDY; 167 else if (page_count(page) == 0 && is_f 152 else if (page_count(page) == 0 && is_free_buddy_page(page)) 168 u |= 1 << KPF_BUDDY; 153 u |= 1 << KPF_BUDDY; 169 154 170 if (PageOffline(page)) !! 155 if (PageBalloon(page)) 171 u |= 1 << KPF_OFFLINE; !! 156 u |= 1 << KPF_BALLOON; 172 if (PageTable(page)) << 173 u |= 1 << KPF_PGTABLE; << 174 if (folio_test_slab(folio)) << 175 u |= 1 << KPF_SLAB; << 176 157 177 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined( !! 158 if (page_is_idle(page)) 178 u |= kpf_copy_bit(k, KPF_IDLE, << 179 #else << 180 if (folio_test_idle(folio)) << 181 u |= 1 << KPF_IDLE; 159 u |= 1 << KPF_IDLE; 182 #endif << 183 160 184 u |= kpf_copy_bit(k, KPF_LOCKED, 161 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); >> 162 >> 163 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); >> 164 if (PageTail(page) && PageSlab(compound_head(page))) >> 165 u |= 1 << KPF_SLAB; >> 166 >> 167 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); 185 u |= kpf_copy_bit(k, KPF_DIRTY, 168 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); 186 u |= kpf_copy_bit(k, KPF_UPTODATE, 169 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); 187 u |= kpf_copy_bit(k, KPF_WRITEBACK, 170 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); 188 171 189 u |= kpf_copy_bit(k, KPF_LRU, 172 u |= kpf_copy_bit(k, KPF_LRU, PG_lru); 190 u |= kpf_copy_bit(k, KPF_REFERENCED, 173 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); 191 u |= kpf_copy_bit(k, KPF_ACTIVE, 174 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 192 u |= kpf_copy_bit(k, KPF_RECLAIM, 175 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 193 176 194 #define SWAPCACHE ((1 << PG_swapbacked) | (1 < !! 177 if (PageSwapCache(page)) 195 if ((k & SWAPCACHE) == SWAPCACHE) << 196 u |= 1 << KPF_SWAPCACHE; 178 u |= 1 << KPF_SWAPCACHE; 197 u |= kpf_copy_bit(k, KPF_SWAPBACKED, 179 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 198 180 199 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, 181 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 200 u |= kpf_copy_bit(k, KPF_MLOCKED, 182 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 201 183 202 #ifdef CONFIG_MEMORY_FAILURE 184 #ifdef CONFIG_MEMORY_FAILURE 203 if (u & (1 << KPF_HUGE)) !! 185 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 204 u |= kpf_copy_bit(k, KPF_HWPOI !! 186 #endif 205 else !! 187 206 u |= kpf_copy_bit(page->flags, !! 188 #ifdef CONFIG_ARCH_USES_PG_UNCACHED >> 189 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 207 #endif 190 #endif 208 191 209 u |= kpf_copy_bit(k, KPF_RESERVED, 192 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); 210 u |= kpf_copy_bit(k, KPF_OWNER_2, !! 193 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); 211 u |= kpf_copy_bit(k, KPF_PRIVATE, 194 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); 212 u |= kpf_copy_bit(k, KPF_PRIVATE_2, 195 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); 213 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE 196 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); 214 u |= kpf_copy_bit(k, KPF_ARCH, 197 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); 215 #ifdef CONFIG_ARCH_USES_PG_ARCH_2 << 216 u |= kpf_copy_bit(k, KPF_ARCH_2, << 217 #endif << 218 #ifdef CONFIG_ARCH_USES_PG_ARCH_3 << 219 u |= kpf_copy_bit(k, KPF_ARCH_3, << 220 #endif << 221 198 222 return u; 199 return u; 223 }; 200 }; 224 201 225 static ssize_t kpageflags_read(struct file *fi 202 static ssize_t kpageflags_read(struct file *file, char __user *buf, 226 size_t count, lof 203 size_t count, loff_t *ppos) 227 { 204 { 228 const unsigned long max_dump_pfn = get << 229 u64 __user *out = (u64 __user *)buf; 205 u64 __user *out = (u64 __user *)buf; >> 206 struct page *ppage; 230 unsigned long src = *ppos; 207 unsigned long src = *ppos; 231 unsigned long pfn; 208 unsigned long pfn; 232 ssize_t ret = 0; 209 ssize_t ret = 0; 233 210 234 pfn = src / KPMSIZE; 211 pfn = src / KPMSIZE; >> 212 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); 235 if (src & KPMMASK || count & KPMMASK) 213 if (src & KPMMASK || count & KPMMASK) 236 return -EINVAL; 214 return -EINVAL; 237 if (src >= max_dump_pfn * KPMSIZE) << 238 return 0; << 239 count = min_t(unsigned long, count, (m << 240 215 241 while (count > 0) { 216 while (count > 0) { 242 /* !! 217 if (pfn_valid(pfn)) 243 * TODO: ZONE_DEVICE support r !! 218 ppage = pfn_to_page(pfn); 244 * memmaps that were actually !! 219 else 245 */ !! 220 ppage = NULL; 246 struct page *page = pfn_to_onl << 247 221 248 if (put_user(stable_page_flags !! 222 if (put_user(stable_page_flags(ppage), out)) { 249 ret = -EFAULT; 223 ret = -EFAULT; 250 break; 224 break; 251 } 225 } 252 226 253 pfn++; 227 pfn++; 254 out++; 228 out++; 255 count -= KPMSIZE; 229 count -= KPMSIZE; 256 230 257 cond_resched(); 231 cond_resched(); 258 } 232 } 259 233 260 *ppos += (char __user *)out - buf; 234 *ppos += (char __user *)out - buf; 261 if (!ret) 235 if (!ret) 262 ret = (char __user *)out - buf 236 ret = (char __user *)out - buf; 263 return ret; 237 return ret; 264 } 238 } 265 239 266 static const struct proc_ops kpageflags_proc_o !! 240 static const struct file_operations proc_kpageflags_operations = { 267 .proc_flags = PROC_ENTRY_PERMANENT !! 241 .llseek = mem_lseek, 268 .proc_lseek = mem_lseek, !! 242 .read = kpageflags_read, 269 .proc_read = kpageflags_read, << 270 }; 243 }; 271 244 272 #ifdef CONFIG_MEMCG 245 #ifdef CONFIG_MEMCG 273 static ssize_t kpagecgroup_read(struct file *f 246 static ssize_t kpagecgroup_read(struct file *file, char __user *buf, 274 size_t count, 247 size_t count, loff_t *ppos) 275 { 248 { 276 const unsigned long max_dump_pfn = get << 277 u64 __user *out = (u64 __user *)buf; 249 u64 __user *out = (u64 __user *)buf; 278 struct page *ppage; 250 struct page *ppage; 279 unsigned long src = *ppos; 251 unsigned long src = *ppos; 280 unsigned long pfn; 252 unsigned long pfn; 281 ssize_t ret = 0; 253 ssize_t ret = 0; 282 u64 ino; 254 u64 ino; 283 255 284 pfn = src / KPMSIZE; 256 pfn = src / KPMSIZE; >> 257 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); 285 if (src & KPMMASK || count & KPMMASK) 258 if (src & KPMMASK || count & KPMMASK) 286 return -EINVAL; 259 return -EINVAL; 287 if (src >= max_dump_pfn * KPMSIZE) << 288 return 0; << 289 count = min_t(unsigned long, count, (m << 290 260 291 while (count > 0) { 261 while (count > 0) { 292 /* !! 262 if (pfn_valid(pfn)) 293 * TODO: ZONE_DEVICE support r !! 263 ppage = pfn_to_page(pfn); 294 * memmaps that were actually !! 264 else 295 */ !! 265 ppage = NULL; 296 ppage = pfn_to_online_page(pfn << 297 266 298 if (ppage) 267 if (ppage) 299 ino = page_cgroup_ino( 268 ino = page_cgroup_ino(ppage); 300 else 269 else 301 ino = 0; 270 ino = 0; 302 271 303 if (put_user(ino, out)) { 272 if (put_user(ino, out)) { 304 ret = -EFAULT; 273 ret = -EFAULT; 305 break; 274 break; 306 } 275 } 307 276 308 pfn++; 277 pfn++; 309 out++; 278 out++; 310 count -= KPMSIZE; 279 count -= KPMSIZE; 311 280 312 cond_resched(); 281 cond_resched(); 313 } 282 } 314 283 315 *ppos += (char __user *)out - buf; 284 *ppos += (char __user *)out - buf; 316 if (!ret) 285 if (!ret) 317 ret = (char __user *)out - buf 286 ret = (char __user *)out - buf; 318 return ret; 287 return ret; 319 } 288 } 320 289 321 static const struct proc_ops kpagecgroup_proc_ !! 290 static const struct file_operations proc_kpagecgroup_operations = { 322 .proc_flags = PROC_ENTRY_PERMANENT !! 291 .llseek = mem_lseek, 323 .proc_lseek = mem_lseek, !! 292 .read = kpagecgroup_read, 324 .proc_read = kpagecgroup_read, << 325 }; 293 }; 326 #endif /* CONFIG_MEMCG */ 294 #endif /* CONFIG_MEMCG */ 327 295 328 static int __init proc_page_init(void) 296 static int __init proc_page_init(void) 329 { 297 { 330 proc_create("kpagecount", S_IRUSR, NUL !! 298 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); 331 proc_create("kpageflags", S_IRUSR, NUL !! 299 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); 332 #ifdef CONFIG_MEMCG 300 #ifdef CONFIG_MEMCG 333 proc_create("kpagecgroup", S_IRUSR, NU !! 301 proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations); 334 #endif 302 #endif 335 return 0; 303 return 0; 336 } 304 } 337 fs_initcall(proc_page_init); 305 fs_initcall(proc_page_init); 338 306
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.