~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/memory.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/memory.c (Version linux-6.12-rc7) and /mm/memory.c (Version linux-4.13.16)


  1                                                << 
  2 // SPDX-License-Identifier: GPL-2.0-only       << 
  3 /*                                                  1 /*
  4  *  linux/mm/memory.c                               2  *  linux/mm/memory.c
  5  *                                                  3  *
  6  *  Copyright (C) 1991, 1992, 1993, 1994  Linu      4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  7  */                                                 5  */
  8                                                     6 
  9 /*                                                  7 /*
 10  * demand-loading started 01.12.91 - seems it       8  * demand-loading started 01.12.91 - seems it is high on the list of
 11  * things wanted, and it should be easy to imp      9  * things wanted, and it should be easy to implement. - Linus
 12  */                                                10  */
 13                                                    11 
 14 /*                                                 12 /*
 15  * Ok, demand-loading was easy, shared pages a     13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
 16  * pages started 02.12.91, seems to work. - Li     14  * pages started 02.12.91, seems to work. - Linus.
 17  *                                                 15  *
 18  * Tested sharing by executing about 30 /bin/s     16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
 19  * would have taken more than the 6M I have fr     17  * would have taken more than the 6M I have free, but it worked well as
 20  * far as I could see.                             18  * far as I could see.
 21  *                                                 19  *
 22  * Also corrected some "invalidate()"s - I was     20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
 23  */                                                21  */
 24                                                    22 
 25 /*                                                 23 /*
 26  * Real VM (paging to/from disk) started 18.12     24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
 27  * thought has to go into this. Oh, well..         25  * thought has to go into this. Oh, well..
 28  * 19.12.91  -  works, somewhat. Sometimes I g     26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
 29  *              Found it. Everything seems to      27  *              Found it. Everything seems to work now.
 30  * 20.12.91  -  Ok, making the swap-device cha     28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
 31  */                                                29  */
 32                                                    30 
 33 /*                                                 31 /*
 34  * 05.04.94  -  Multi-page memory management a     32  * 05.04.94  -  Multi-page memory management added for v1.1.
 35  *              Idea by Alex Bligh (alex@cconc     33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
 36  *                                                 34  *
 37  * 16.07.99  -  Support of BIGMEM added by Ger     35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
 38  *              (Gerhard.Wichert@pdb.siemens.d     36  *              (Gerhard.Wichert@pdb.siemens.de)
 39  *                                                 37  *
 40  * Aug/Sep 2004 Changed to four level page tab     38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
 41  */                                                39  */
 42                                                    40 
 43 #include <linux/kernel_stat.h>                     41 #include <linux/kernel_stat.h>
 44 #include <linux/mm.h>                              42 #include <linux/mm.h>
 45 #include <linux/mm_inline.h>                   << 
 46 #include <linux/sched/mm.h>                        43 #include <linux/sched/mm.h>
 47 #include <linux/sched/coredump.h>                  44 #include <linux/sched/coredump.h>
 48 #include <linux/sched/numa_balancing.h>            45 #include <linux/sched/numa_balancing.h>
 49 #include <linux/sched/task.h>                      46 #include <linux/sched/task.h>
 50 #include <linux/hugetlb.h>                         47 #include <linux/hugetlb.h>
 51 #include <linux/mman.h>                            48 #include <linux/mman.h>
 52 #include <linux/swap.h>                            49 #include <linux/swap.h>
 53 #include <linux/highmem.h>                         50 #include <linux/highmem.h>
 54 #include <linux/pagemap.h>                         51 #include <linux/pagemap.h>
 55 #include <linux/memremap.h>                    << 
 56 #include <linux/kmsan.h>                       << 
 57 #include <linux/ksm.h>                             52 #include <linux/ksm.h>
 58 #include <linux/rmap.h>                            53 #include <linux/rmap.h>
 59 #include <linux/export.h>                          54 #include <linux/export.h>
 60 #include <linux/delayacct.h>                       55 #include <linux/delayacct.h>
 61 #include <linux/init.h>                            56 #include <linux/init.h>
 62 #include <linux/pfn_t.h>                           57 #include <linux/pfn_t.h>
 63 #include <linux/writeback.h>                       58 #include <linux/writeback.h>
 64 #include <linux/memcontrol.h>                      59 #include <linux/memcontrol.h>
 65 #include <linux/mmu_notifier.h>                    60 #include <linux/mmu_notifier.h>
                                                   >>  61 #include <linux/kallsyms.h>
 66 #include <linux/swapops.h>                         62 #include <linux/swapops.h>
 67 #include <linux/elf.h>                             63 #include <linux/elf.h>
 68 #include <linux/gfp.h>                             64 #include <linux/gfp.h>
 69 #include <linux/migrate.h>                         65 #include <linux/migrate.h>
 70 #include <linux/string.h>                          66 #include <linux/string.h>
 71 #include <linux/memory-tiers.h>                !!  67 #include <linux/dma-debug.h>
 72 #include <linux/debugfs.h>                         68 #include <linux/debugfs.h>
 73 #include <linux/userfaultfd_k.h>                   69 #include <linux/userfaultfd_k.h>
 74 #include <linux/dax.h>                             70 #include <linux/dax.h>
 75 #include <linux/oom.h>                             71 #include <linux/oom.h>
 76 #include <linux/numa.h>                        << 
 77 #include <linux/perf_event.h>                  << 
 78 #include <linux/ptrace.h>                      << 
 79 #include <linux/vmalloc.h>                     << 
 80 #include <linux/sched/sysctl.h>                << 
 81                                                << 
 82 #include <trace/events/kmem.h>                 << 
 83                                                    72 
 84 #include <asm/io.h>                                73 #include <asm/io.h>
 85 #include <asm/mmu_context.h>                       74 #include <asm/mmu_context.h>
 86 #include <asm/pgalloc.h>                           75 #include <asm/pgalloc.h>
 87 #include <linux/uaccess.h>                         76 #include <linux/uaccess.h>
 88 #include <asm/tlb.h>                               77 #include <asm/tlb.h>
 89 #include <asm/tlbflush.h>                          78 #include <asm/tlbflush.h>
                                                   >>  79 #include <asm/pgtable.h>
 90                                                    80 
 91 #include "pgalloc-track.h"                     << 
 92 #include "internal.h"                              81 #include "internal.h"
 93 #include "swap.h"                              << 
 94                                                    82 
 95 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) &&  !!  83 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 96 #warning Unfortunate NUMA and NUMA Balancing c     84 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
 97 #endif                                             85 #endif
 98                                                    86 
 99 #ifndef CONFIG_NUMA                            !!  87 #ifndef CONFIG_NEED_MULTIPLE_NODES
                                                   >>  88 /* use the per-pgdat data instead for discontigmem - mbligh */
100 unsigned long max_mapnr;                           89 unsigned long max_mapnr;
101 EXPORT_SYMBOL(max_mapnr);                          90 EXPORT_SYMBOL(max_mapnr);
102                                                    91 
103 struct page *mem_map;                              92 struct page *mem_map;
104 EXPORT_SYMBOL(mem_map);                            93 EXPORT_SYMBOL(mem_map);
105 #endif                                             94 #endif
106                                                    95 
107 static vm_fault_t do_fault(struct vm_fault *vm << 
108 static vm_fault_t do_anonymous_page(struct vm_ << 
109 static bool vmf_pte_changed(struct vm_fault *v << 
110                                                << 
111 /*                                             << 
112  * Return true if the original pte was a uffd- << 
113  * wr-protected).                              << 
114  */                                            << 
115 static __always_inline bool vmf_orig_pte_uffd_ << 
116 {                                              << 
117         if (!userfaultfd_wp(vmf->vma))         << 
118                 return false;                  << 
119         if (!(vmf->flags & FAULT_FLAG_ORIG_PTE << 
120                 return false;                  << 
121                                                << 
122         return pte_marker_uffd_wp(vmf->orig_pt << 
123 }                                              << 
124                                                << 
125 /*                                                 96 /*
126  * A number of key systems in x86 including io     97  * A number of key systems in x86 including ioremap() rely on the assumption
127  * that high_memory defines the upper bound on     98  * that high_memory defines the upper bound on direct map memory, then end
128  * of ZONE_NORMAL.                             !!  99  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
                                                   >> 100  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
                                                   >> 101  * and ZONE_HIGHMEM.
129  */                                               102  */
130 void *high_memory;                                103 void *high_memory;
131 EXPORT_SYMBOL(high_memory);                       104 EXPORT_SYMBOL(high_memory);
132                                                   105 
133 /*                                                106 /*
134  * Randomize the address space (stacks, mmaps,    107  * Randomize the address space (stacks, mmaps, brk, etc.).
135  *                                                108  *
136  * ( When CONFIG_COMPAT_BRK=y we exclude brk f    109  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
137  *   as ancient (libc5 based) binaries can seg    110  *   as ancient (libc5 based) binaries can segfault. )
138  */                                               111  */
139 int randomize_va_space __read_mostly =            112 int randomize_va_space __read_mostly =
140 #ifdef CONFIG_COMPAT_BRK                          113 #ifdef CONFIG_COMPAT_BRK
141                                         1;        114                                         1;
142 #else                                             115 #else
143                                         2;        116                                         2;
144 #endif                                            117 #endif
145                                                   118 
146 #ifndef arch_wants_old_prefaulted_pte          << 
147 static inline bool arch_wants_old_prefaulted_p << 
148 {                                              << 
149         /*                                     << 
150          * Transitioning a PTE from 'old' to ' << 
151          * some architectures, even if it's pe << 
152          * default, "false" means prefaulted e << 
153          */                                    << 
154         return false;                          << 
155 }                                              << 
156 #endif                                         << 
157                                                << 
158 static int __init disable_randmaps(char *s)       119 static int __init disable_randmaps(char *s)
159 {                                                 120 {
160         randomize_va_space = 0;                   121         randomize_va_space = 0;
161         return 1;                                 122         return 1;
162 }                                                 123 }
163 __setup("norandmaps", disable_randmaps);          124 __setup("norandmaps", disable_randmaps);
164                                                   125 
165 unsigned long zero_pfn __read_mostly;             126 unsigned long zero_pfn __read_mostly;
166 EXPORT_SYMBOL(zero_pfn);                          127 EXPORT_SYMBOL(zero_pfn);
167                                                   128 
168 unsigned long highest_memmap_pfn __read_mostly    129 unsigned long highest_memmap_pfn __read_mostly;
169                                                   130 
170 /*                                                131 /*
171  * CONFIG_MMU architectures set up ZERO_PAGE i    132  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
172  */                                               133  */
173 static int __init init_zero_pfn(void)             134 static int __init init_zero_pfn(void)
174 {                                                 135 {
175         zero_pfn = page_to_pfn(ZERO_PAGE(0));     136         zero_pfn = page_to_pfn(ZERO_PAGE(0));
176         return 0;                                 137         return 0;
177 }                                                 138 }
178 early_initcall(init_zero_pfn);                 !! 139 core_initcall(init_zero_pfn);
                                                   >> 140 
                                                   >> 141 
                                                   >> 142 #if defined(SPLIT_RSS_COUNTING)
                                                   >> 143 
                                                   >> 144 void sync_mm_rss(struct mm_struct *mm)
                                                   >> 145 {
                                                   >> 146         int i;
                                                   >> 147 
                                                   >> 148         for (i = 0; i < NR_MM_COUNTERS; i++) {
                                                   >> 149                 if (current->rss_stat.count[i]) {
                                                   >> 150                         add_mm_counter(mm, i, current->rss_stat.count[i]);
                                                   >> 151                         current->rss_stat.count[i] = 0;
                                                   >> 152                 }
                                                   >> 153         }
                                                   >> 154         current->rss_stat.events = 0;
                                                   >> 155 }
                                                   >> 156 
                                                   >> 157 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
                                                   >> 158 {
                                                   >> 159         struct task_struct *task = current;
                                                   >> 160 
                                                   >> 161         if (likely(task->mm == mm))
                                                   >> 162                 task->rss_stat.count[member] += val;
                                                   >> 163         else
                                                   >> 164                 add_mm_counter(mm, member, val);
                                                   >> 165 }
                                                   >> 166 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
                                                   >> 167 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
                                                   >> 168 
                                                   >> 169 /* sync counter once per 64 page faults */
                                                   >> 170 #define TASK_RSS_EVENTS_THRESH  (64)
                                                   >> 171 static void check_sync_rss_stat(struct task_struct *task)
                                                   >> 172 {
                                                   >> 173         if (unlikely(task != current))
                                                   >> 174                 return;
                                                   >> 175         if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
                                                   >> 176                 sync_mm_rss(task->mm);
                                                   >> 177 }
                                                   >> 178 #else /* SPLIT_RSS_COUNTING */
                                                   >> 179 
                                                   >> 180 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
                                                   >> 181 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
                                                   >> 182 
                                                   >> 183 static void check_sync_rss_stat(struct task_struct *task)
                                                   >> 184 {
                                                   >> 185 }
                                                   >> 186 
                                                   >> 187 #endif /* SPLIT_RSS_COUNTING */
                                                   >> 188 
                                                   >> 189 #ifdef HAVE_GENERIC_MMU_GATHER
                                                   >> 190 
                                                   >> 191 static bool tlb_next_batch(struct mmu_gather *tlb)
                                                   >> 192 {
                                                   >> 193         struct mmu_gather_batch *batch;
                                                   >> 194 
                                                   >> 195         batch = tlb->active;
                                                   >> 196         if (batch->next) {
                                                   >> 197                 tlb->active = batch->next;
                                                   >> 198                 return true;
                                                   >> 199         }
                                                   >> 200 
                                                   >> 201         if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
                                                   >> 202                 return false;
                                                   >> 203 
                                                   >> 204         batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
                                                   >> 205         if (!batch)
                                                   >> 206                 return false;
                                                   >> 207 
                                                   >> 208         tlb->batch_count++;
                                                   >> 209         batch->next = NULL;
                                                   >> 210         batch->nr   = 0;
                                                   >> 211         batch->max  = MAX_GATHER_BATCH;
                                                   >> 212 
                                                   >> 213         tlb->active->next = batch;
                                                   >> 214         tlb->active = batch;
                                                   >> 215 
                                                   >> 216         return true;
                                                   >> 217 }
                                                   >> 218 
                                                   >> 219 void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                                                   >> 220                                 unsigned long start, unsigned long end)
                                                   >> 221 {
                                                   >> 222         tlb->mm = mm;
                                                   >> 223 
                                                   >> 224         /* Is it from 0 to ~0? */
                                                   >> 225         tlb->fullmm     = !(start | (end+1));
                                                   >> 226         tlb->need_flush_all = 0;
                                                   >> 227         tlb->local.next = NULL;
                                                   >> 228         tlb->local.nr   = 0;
                                                   >> 229         tlb->local.max  = ARRAY_SIZE(tlb->__pages);
                                                   >> 230         tlb->active     = &tlb->local;
                                                   >> 231         tlb->batch_count = 0;
                                                   >> 232 
                                                   >> 233 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
                                                   >> 234         tlb->batch = NULL;
                                                   >> 235 #endif
                                                   >> 236         tlb->page_size = 0;
                                                   >> 237 
                                                   >> 238         __tlb_reset_range(tlb);
                                                   >> 239 }
                                                   >> 240 
                                                   >> 241 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
                                                   >> 242 {
                                                   >> 243         if (!tlb->end)
                                                   >> 244                 return;
                                                   >> 245 
                                                   >> 246         tlb_flush(tlb);
                                                   >> 247         mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
                                                   >> 248 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
                                                   >> 249         tlb_table_flush(tlb);
                                                   >> 250 #endif
                                                   >> 251         __tlb_reset_range(tlb);
                                                   >> 252 }
                                                   >> 253 
                                                   >> 254 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
                                                   >> 255 {
                                                   >> 256         struct mmu_gather_batch *batch;
                                                   >> 257 
                                                   >> 258         for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
                                                   >> 259                 free_pages_and_swap_cache(batch->pages, batch->nr);
                                                   >> 260                 batch->nr = 0;
                                                   >> 261         }
                                                   >> 262         tlb->active = &tlb->local;
                                                   >> 263 }
179                                                   264 
180 void mm_trace_rss_stat(struct mm_struct *mm, i !! 265 void tlb_flush_mmu(struct mmu_gather *tlb)
181 {                                                 266 {
182         trace_rss_stat(mm, member);            !! 267         tlb_flush_mmu_tlbonly(tlb);
                                                   >> 268         tlb_flush_mmu_free(tlb);
                                                   >> 269 }
                                                   >> 270 
                                                   >> 271 /* tlb_finish_mmu
                                                   >> 272  *      Called at the end of the shootdown operation to free up any resources
                                                   >> 273  *      that were required.
                                                   >> 274  */
                                                   >> 275 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
                                                   >> 276                 unsigned long start, unsigned long end, bool force)
                                                   >> 277 {
                                                   >> 278         struct mmu_gather_batch *batch, *next;
                                                   >> 279 
                                                   >> 280         if (force)
                                                   >> 281                 __tlb_adjust_range(tlb, start, end - start);
                                                   >> 282 
                                                   >> 283         tlb_flush_mmu(tlb);
                                                   >> 284 
                                                   >> 285         /* keep the page table cache within bounds */
                                                   >> 286         check_pgt_cache();
                                                   >> 287 
                                                   >> 288         for (batch = tlb->local.next; batch; batch = next) {
                                                   >> 289                 next = batch->next;
                                                   >> 290                 free_pages((unsigned long)batch, 0);
                                                   >> 291         }
                                                   >> 292         tlb->local.next = NULL;
                                                   >> 293 }
                                                   >> 294 
                                                   >> 295 /* __tlb_remove_page
                                                   >> 296  *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
                                                   >> 297  *      handling the additional races in SMP caused by other CPUs caching valid
                                                   >> 298  *      mappings in their TLBs. Returns the number of free page slots left.
                                                   >> 299  *      When out of page slots we must call tlb_flush_mmu().
                                                   >> 300  *returns true if the caller should flush.
                                                   >> 301  */
                                                   >> 302 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
                                                   >> 303 {
                                                   >> 304         struct mmu_gather_batch *batch;
                                                   >> 305 
                                                   >> 306         VM_BUG_ON(!tlb->end);
                                                   >> 307         VM_WARN_ON(tlb->page_size != page_size);
                                                   >> 308 
                                                   >> 309         batch = tlb->active;
                                                   >> 310         /*
                                                   >> 311          * Add the page and check if we are full. If so
                                                   >> 312          * force a flush.
                                                   >> 313          */
                                                   >> 314         batch->pages[batch->nr++] = page;
                                                   >> 315         if (batch->nr == batch->max) {
                                                   >> 316                 if (!tlb_next_batch(tlb))
                                                   >> 317                         return true;
                                                   >> 318                 batch = tlb->active;
                                                   >> 319         }
                                                   >> 320         VM_BUG_ON_PAGE(batch->nr > batch->max, page);
                                                   >> 321 
                                                   >> 322         return false;
                                                   >> 323 }
                                                   >> 324 
                                                   >> 325 #endif /* HAVE_GENERIC_MMU_GATHER */
                                                   >> 326 
                                                   >> 327 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
                                                   >> 328 
                                                   >> 329 /*
                                                   >> 330  * See the comment near struct mmu_table_batch.
                                                   >> 331  */
                                                   >> 332 
                                                   >> 333 static void tlb_remove_table_smp_sync(void *arg)
                                                   >> 334 {
                                                   >> 335         /* Simply deliver the interrupt */
                                                   >> 336 }
                                                   >> 337 
                                                   >> 338 static void tlb_remove_table_one(void *table)
                                                   >> 339 {
                                                   >> 340         /*
                                                   >> 341          * This isn't an RCU grace period and hence the page-tables cannot be
                                                   >> 342          * assumed to be actually RCU-freed.
                                                   >> 343          *
                                                   >> 344          * It is however sufficient for software page-table walkers that rely on
                                                   >> 345          * IRQ disabling. See the comment near struct mmu_table_batch.
                                                   >> 346          */
                                                   >> 347         smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
                                                   >> 348         __tlb_remove_table(table);
                                                   >> 349 }
                                                   >> 350 
                                                   >> 351 static void tlb_remove_table_rcu(struct rcu_head *head)
                                                   >> 352 {
                                                   >> 353         struct mmu_table_batch *batch;
                                                   >> 354         int i;
                                                   >> 355 
                                                   >> 356         batch = container_of(head, struct mmu_table_batch, rcu);
                                                   >> 357 
                                                   >> 358         for (i = 0; i < batch->nr; i++)
                                                   >> 359                 __tlb_remove_table(batch->tables[i]);
                                                   >> 360 
                                                   >> 361         free_page((unsigned long)batch);
                                                   >> 362 }
                                                   >> 363 
                                                   >> 364 void tlb_table_flush(struct mmu_gather *tlb)
                                                   >> 365 {
                                                   >> 366         struct mmu_table_batch **batch = &tlb->batch;
                                                   >> 367 
                                                   >> 368         if (*batch) {
                                                   >> 369                 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
                                                   >> 370                 *batch = NULL;
                                                   >> 371         }
                                                   >> 372 }
                                                   >> 373 
                                                   >> 374 void tlb_remove_table(struct mmu_gather *tlb, void *table)
                                                   >> 375 {
                                                   >> 376         struct mmu_table_batch **batch = &tlb->batch;
                                                   >> 377 
                                                   >> 378         /*
                                                   >> 379          * When there's less then two users of this mm there cannot be a
                                                   >> 380          * concurrent page-table walk.
                                                   >> 381          */
                                                   >> 382         if (atomic_read(&tlb->mm->mm_users) < 2) {
                                                   >> 383                 __tlb_remove_table(table);
                                                   >> 384                 return;
                                                   >> 385         }
                                                   >> 386 
                                                   >> 387         if (*batch == NULL) {
                                                   >> 388                 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
                                                   >> 389                 if (*batch == NULL) {
                                                   >> 390                         tlb_remove_table_one(table);
                                                   >> 391                         return;
                                                   >> 392                 }
                                                   >> 393                 (*batch)->nr = 0;
                                                   >> 394         }
                                                   >> 395         (*batch)->tables[(*batch)->nr++] = table;
                                                   >> 396         if ((*batch)->nr == MAX_TABLE_BATCH)
                                                   >> 397                 tlb_table_flush(tlb);
                                                   >> 398 }
                                                   >> 399 
                                                   >> 400 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
                                                   >> 401 
                                                   >> 402 /* tlb_gather_mmu
                                                   >> 403  *      Called to initialize an (on-stack) mmu_gather structure for page-table
                                                   >> 404  *      tear-down from @mm. The @fullmm argument is used when @mm is without
                                                   >> 405  *      users and we're going to destroy the full address space (exit/execve).
                                                   >> 406  */
                                                   >> 407 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                                                   >> 408                         unsigned long start, unsigned long end)
                                                   >> 409 {
                                                   >> 410         arch_tlb_gather_mmu(tlb, mm, start, end);
                                                   >> 411         inc_tlb_flush_pending(tlb->mm);
                                                   >> 412 }
                                                   >> 413 
                                                   >> 414 void tlb_finish_mmu(struct mmu_gather *tlb,
                                                   >> 415                 unsigned long start, unsigned long end)
                                                   >> 416 {
                                                   >> 417         /*
                                                   >> 418          * If there are parallel threads are doing PTE changes on same range
                                                   >> 419          * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
                                                   >> 420          * flush by batching, a thread has stable TLB entry can fail to flush
                                                   >> 421          * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
                                                   >> 422          * forcefully if we detect parallel PTE batching threads.
                                                   >> 423          */
                                                   >> 424         bool force = mm_tlb_flush_nested(tlb->mm);
                                                   >> 425 
                                                   >> 426         arch_tlb_finish_mmu(tlb, start, end, force);
                                                   >> 427         dec_tlb_flush_pending(tlb->mm);
183 }                                                 428 }
184                                                   429 
185 /*                                                430 /*
186  * Note: this doesn't free the actual pages th    431  * Note: this doesn't free the actual pages themselves. That
187  * has been handled earlier when unmapping all    432  * has been handled earlier when unmapping all the memory regions.
188  */                                               433  */
189 static void free_pte_range(struct mmu_gather *    434 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
190                            unsigned long addr)    435                            unsigned long addr)
191 {                                                 436 {
192         pgtable_t token = pmd_pgtable(*pmd);      437         pgtable_t token = pmd_pgtable(*pmd);
193         pmd_clear(pmd);                           438         pmd_clear(pmd);
194         pte_free_tlb(tlb, token, addr);           439         pte_free_tlb(tlb, token, addr);
195         mm_dec_nr_ptes(tlb->mm);               !! 440         atomic_long_dec(&tlb->mm->nr_ptes);
196 }                                                 441 }
197                                                   442 
198 static inline void free_pmd_range(struct mmu_g    443 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
199                                 unsigned long     444                                 unsigned long addr, unsigned long end,
200                                 unsigned long     445                                 unsigned long floor, unsigned long ceiling)
201 {                                                 446 {
202         pmd_t *pmd;                               447         pmd_t *pmd;
203         unsigned long next;                       448         unsigned long next;
204         unsigned long start;                      449         unsigned long start;
205                                                   450 
206         start = addr;                             451         start = addr;
207         pmd = pmd_offset(pud, addr);              452         pmd = pmd_offset(pud, addr);
208         do {                                      453         do {
209                 next = pmd_addr_end(addr, end)    454                 next = pmd_addr_end(addr, end);
210                 if (pmd_none_or_clear_bad(pmd)    455                 if (pmd_none_or_clear_bad(pmd))
211                         continue;                 456                         continue;
212                 free_pte_range(tlb, pmd, addr)    457                 free_pte_range(tlb, pmd, addr);
213         } while (pmd++, addr = next, addr != e    458         } while (pmd++, addr = next, addr != end);
214                                                   459 
215         start &= PUD_MASK;                        460         start &= PUD_MASK;
216         if (start < floor)                        461         if (start < floor)
217                 return;                           462                 return;
218         if (ceiling) {                            463         if (ceiling) {
219                 ceiling &= PUD_MASK;              464                 ceiling &= PUD_MASK;
220                 if (!ceiling)                     465                 if (!ceiling)
221                         return;                   466                         return;
222         }                                         467         }
223         if (end - 1 > ceiling - 1)                468         if (end - 1 > ceiling - 1)
224                 return;                           469                 return;
225                                                   470 
226         pmd = pmd_offset(pud, start);             471         pmd = pmd_offset(pud, start);
227         pud_clear(pud);                           472         pud_clear(pud);
228         pmd_free_tlb(tlb, pmd, start);            473         pmd_free_tlb(tlb, pmd, start);
229         mm_dec_nr_pmds(tlb->mm);                  474         mm_dec_nr_pmds(tlb->mm);
230 }                                                 475 }
231                                                   476 
232 static inline void free_pud_range(struct mmu_g    477 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
233                                 unsigned long     478                                 unsigned long addr, unsigned long end,
234                                 unsigned long     479                                 unsigned long floor, unsigned long ceiling)
235 {                                                 480 {
236         pud_t *pud;                               481         pud_t *pud;
237         unsigned long next;                       482         unsigned long next;
238         unsigned long start;                      483         unsigned long start;
239                                                   484 
240         start = addr;                             485         start = addr;
241         pud = pud_offset(p4d, addr);              486         pud = pud_offset(p4d, addr);
242         do {                                      487         do {
243                 next = pud_addr_end(addr, end)    488                 next = pud_addr_end(addr, end);
244                 if (pud_none_or_clear_bad(pud)    489                 if (pud_none_or_clear_bad(pud))
245                         continue;                 490                         continue;
246                 free_pmd_range(tlb, pud, addr,    491                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
247         } while (pud++, addr = next, addr != e    492         } while (pud++, addr = next, addr != end);
248                                                   493 
249         start &= P4D_MASK;                        494         start &= P4D_MASK;
250         if (start < floor)                        495         if (start < floor)
251                 return;                           496                 return;
252         if (ceiling) {                            497         if (ceiling) {
253                 ceiling &= P4D_MASK;              498                 ceiling &= P4D_MASK;
254                 if (!ceiling)                     499                 if (!ceiling)
255                         return;                   500                         return;
256         }                                         501         }
257         if (end - 1 > ceiling - 1)                502         if (end - 1 > ceiling - 1)
258                 return;                           503                 return;
259                                                   504 
260         pud = pud_offset(p4d, start);             505         pud = pud_offset(p4d, start);
261         p4d_clear(p4d);                           506         p4d_clear(p4d);
262         pud_free_tlb(tlb, pud, start);            507         pud_free_tlb(tlb, pud, start);
263         mm_dec_nr_puds(tlb->mm);               << 
264 }                                                 508 }
265                                                   509 
266 static inline void free_p4d_range(struct mmu_g    510 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
267                                 unsigned long     511                                 unsigned long addr, unsigned long end,
268                                 unsigned long     512                                 unsigned long floor, unsigned long ceiling)
269 {                                                 513 {
270         p4d_t *p4d;                               514         p4d_t *p4d;
271         unsigned long next;                       515         unsigned long next;
272         unsigned long start;                      516         unsigned long start;
273                                                   517 
274         start = addr;                             518         start = addr;
275         p4d = p4d_offset(pgd, addr);              519         p4d = p4d_offset(pgd, addr);
276         do {                                      520         do {
277                 next = p4d_addr_end(addr, end)    521                 next = p4d_addr_end(addr, end);
278                 if (p4d_none_or_clear_bad(p4d)    522                 if (p4d_none_or_clear_bad(p4d))
279                         continue;                 523                         continue;
280                 free_pud_range(tlb, p4d, addr,    524                 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
281         } while (p4d++, addr = next, addr != e    525         } while (p4d++, addr = next, addr != end);
282                                                   526 
283         start &= PGDIR_MASK;                      527         start &= PGDIR_MASK;
284         if (start < floor)                        528         if (start < floor)
285                 return;                           529                 return;
286         if (ceiling) {                            530         if (ceiling) {
287                 ceiling &= PGDIR_MASK;            531                 ceiling &= PGDIR_MASK;
288                 if (!ceiling)                     532                 if (!ceiling)
289                         return;                   533                         return;
290         }                                         534         }
291         if (end - 1 > ceiling - 1)                535         if (end - 1 > ceiling - 1)
292                 return;                           536                 return;
293                                                   537 
294         p4d = p4d_offset(pgd, start);             538         p4d = p4d_offset(pgd, start);
295         pgd_clear(pgd);                           539         pgd_clear(pgd);
296         p4d_free_tlb(tlb, p4d, start);            540         p4d_free_tlb(tlb, p4d, start);
297 }                                                 541 }
298                                                   542 
299 /*                                                543 /*
300  * This function frees user-level page tables     544  * This function frees user-level page tables of a process.
301  */                                               545  */
302 void free_pgd_range(struct mmu_gather *tlb,       546 void free_pgd_range(struct mmu_gather *tlb,
303                         unsigned long addr, un    547                         unsigned long addr, unsigned long end,
304                         unsigned long floor, u    548                         unsigned long floor, unsigned long ceiling)
305 {                                                 549 {
306         pgd_t *pgd;                               550         pgd_t *pgd;
307         unsigned long next;                       551         unsigned long next;
308                                                   552 
309         /*                                        553         /*
310          * The next few lines have given us lo    554          * The next few lines have given us lots of grief...
311          *                                        555          *
312          * Why are we testing PMD* at this top    556          * Why are we testing PMD* at this top level?  Because often
313          * there will be no work to do at all,    557          * there will be no work to do at all, and we'd prefer not to
314          * go all the way down to the bottom j    558          * go all the way down to the bottom just to discover that.
315          *                                        559          *
316          * Why all these "- 1"s?  Because 0 re    560          * Why all these "- 1"s?  Because 0 represents both the bottom
317          * of the address space and the top of    561          * of the address space and the top of it (using -1 for the
318          * top wouldn't help much: the masks w    562          * top wouldn't help much: the masks would do the wrong thing).
319          * The rule is that addr 0 and floor 0    563          * The rule is that addr 0 and floor 0 refer to the bottom of
320          * the address space, but end 0 and ce    564          * the address space, but end 0 and ceiling 0 refer to the top
321          * Comparisons need to use "end - 1" a    565          * Comparisons need to use "end - 1" and "ceiling - 1" (though
322          * that end 0 case should be mythical)    566          * that end 0 case should be mythical).
323          *                                        567          *
324          * Wherever addr is brought up or ceil    568          * Wherever addr is brought up or ceiling brought down, we must
325          * be careful to reject "the opposite     569          * be careful to reject "the opposite 0" before it confuses the
326          * subsequent tests.  But what about w    570          * subsequent tests.  But what about where end is brought down
327          * by PMD_SIZE below? no, end can't go    571          * by PMD_SIZE below? no, end can't go down to 0 there.
328          *                                        572          *
329          * Whereas we round start (addr) and c    573          * Whereas we round start (addr) and ceiling down, by different
330          * masks at different levels, in order    574          * masks at different levels, in order to test whether a table
331          * now has no other vmas using it, so     575          * now has no other vmas using it, so can be freed, we don't
332          * bother to round floor or end up - t    576          * bother to round floor or end up - the tests don't need that.
333          */                                       577          */
334                                                   578 
335         addr &= PMD_MASK;                         579         addr &= PMD_MASK;
336         if (addr < floor) {                       580         if (addr < floor) {
337                 addr += PMD_SIZE;                 581                 addr += PMD_SIZE;
338                 if (!addr)                        582                 if (!addr)
339                         return;                   583                         return;
340         }                                         584         }
341         if (ceiling) {                            585         if (ceiling) {
342                 ceiling &= PMD_MASK;              586                 ceiling &= PMD_MASK;
343                 if (!ceiling)                     587                 if (!ceiling)
344                         return;                   588                         return;
345         }                                         589         }
346         if (end - 1 > ceiling - 1)                590         if (end - 1 > ceiling - 1)
347                 end -= PMD_SIZE;                  591                 end -= PMD_SIZE;
348         if (addr > end - 1)                       592         if (addr > end - 1)
349                 return;                           593                 return;
350         /*                                        594         /*
351          * We add page table cache pages with     595          * We add page table cache pages with PAGE_SIZE,
352          * (see pte_free_tlb()), flush the tlb    596          * (see pte_free_tlb()), flush the tlb if we need
353          */                                       597          */
354         tlb_change_page_size(tlb, PAGE_SIZE);  !! 598         tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
355         pgd = pgd_offset(tlb->mm, addr);          599         pgd = pgd_offset(tlb->mm, addr);
356         do {                                      600         do {
357                 next = pgd_addr_end(addr, end)    601                 next = pgd_addr_end(addr, end);
358                 if (pgd_none_or_clear_bad(pgd)    602                 if (pgd_none_or_clear_bad(pgd))
359                         continue;                 603                         continue;
360                 free_p4d_range(tlb, pgd, addr,    604                 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
361         } while (pgd++, addr = next, addr != e    605         } while (pgd++, addr = next, addr != end);
362 }                                                 606 }
363                                                   607 
364 void free_pgtables(struct mmu_gather *tlb, str !! 608 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
365                    struct vm_area_struct *vma, !! 609                 unsigned long floor, unsigned long ceiling)
366                    unsigned long ceiling, bool << 
367 {                                                 610 {
368         struct unlink_vma_file_batch vb;       !! 611         while (vma) {
369                                                !! 612                 struct vm_area_struct *next = vma->vm_next;
370         do {                                   << 
371                 unsigned long addr = vma->vm_s    613                 unsigned long addr = vma->vm_start;
372                 struct vm_area_struct *next;   << 
373                                                << 
374                 /*                             << 
375                  * Note: USER_PGTABLES_CEILING << 
376                  * be 0.  This will underflow  << 
377                  */                            << 
378                 next = mas_find(mas, ceiling - << 
379                 if (unlikely(xa_is_zero(next)) << 
380                         next = NULL;           << 
381                                                   614 
382                 /*                                615                 /*
383                  * Hide vma from rmap and trun    616                  * Hide vma from rmap and truncate_pagecache before freeing
384                  * pgtables                       617                  * pgtables
385                  */                               618                  */
386                 if (mm_wr_locked)              << 
387                         vma_start_write(vma);  << 
388                 unlink_anon_vmas(vma);            619                 unlink_anon_vmas(vma);
                                                   >> 620                 unlink_file_vma(vma);
389                                                   621 
390                 if (is_vm_hugetlb_page(vma)) {    622                 if (is_vm_hugetlb_page(vma)) {
391                         unlink_file_vma(vma);  << 
392                         hugetlb_free_pgd_range    623                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
393                                 floor, next ?     624                                 floor, next ? next->vm_start : ceiling);
394                 } else {                          625                 } else {
395                         unlink_file_vma_batch_ << 
396                         unlink_file_vma_batch_ << 
397                                                << 
398                         /*                        626                         /*
399                          * Optimization: gathe    627                          * Optimization: gather nearby vmas into one call down
400                          */                       628                          */
401                         while (next && next->v    629                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
402                                && !is_vm_huget    630                                && !is_vm_hugetlb_page(next)) {
403                                 vma = next;       631                                 vma = next;
404                                 next = mas_fin !! 632                                 next = vma->vm_next;
405                                 if (unlikely(x << 
406                                         next = << 
407                                 if (mm_wr_lock << 
408                                         vma_st << 
409                                 unlink_anon_vm    633                                 unlink_anon_vmas(vma);
410                                 unlink_file_vm !! 634                                 unlink_file_vma(vma);
411                         }                         635                         }
412                         unlink_file_vma_batch_ << 
413                         free_pgd_range(tlb, ad    636                         free_pgd_range(tlb, addr, vma->vm_end,
414                                 floor, next ?     637                                 floor, next ? next->vm_start : ceiling);
415                 }                                 638                 }
416                 vma = next;                       639                 vma = next;
417         } while (vma);                         << 
418 }                                              << 
419                                                << 
420 void pmd_install(struct mm_struct *mm, pmd_t * << 
421 {                                              << 
422         spinlock_t *ptl = pmd_lock(mm, pmd);   << 
423                                                << 
424         if (likely(pmd_none(*pmd))) {   /* Has << 
425                 mm_inc_nr_ptes(mm);            << 
426                 /*                             << 
427                  * Ensure all pte setup (eg. p << 
428                  * visible before the pte is m << 
429                  * put into page tables.       << 
430                  *                             << 
431                  * The other side of the story << 
432                  * table walking code (when wa << 
433                  * ie. most of the time). Fort << 
434                  * of a chain of data-dependen << 
435                  * being the notable exception << 
436                  * seen in-order. See the alph << 
437                  * smp_rmb() barriers in page  << 
438                  */                            << 
439                 smp_wmb(); /* Could be smp_wmb << 
440                 pmd_populate(mm, pmd, *pte);   << 
441                 *pte = NULL;                   << 
442         }                                         640         }
443         spin_unlock(ptl);                      << 
444 }                                                 641 }
445                                                   642 
446 int __pte_alloc(struct mm_struct *mm, pmd_t *p !! 643 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
447 {                                                 644 {
448         pgtable_t new = pte_alloc_one(mm);     !! 645         spinlock_t *ptl;
                                                   >> 646         pgtable_t new = pte_alloc_one(mm, address);
449         if (!new)                                 647         if (!new)
450                 return -ENOMEM;                   648                 return -ENOMEM;
451                                                   649 
452         pmd_install(mm, pmd, &new);            !! 650         /*
                                                   >> 651          * Ensure all pte setup (eg. pte page lock and page clearing) are
                                                   >> 652          * visible before the pte is made visible to other CPUs by being
                                                   >> 653          * put into page tables.
                                                   >> 654          *
                                                   >> 655          * The other side of the story is the pointer chasing in the page
                                                   >> 656          * table walking code (when walking the page table without locking;
                                                   >> 657          * ie. most of the time). Fortunately, these data accesses consist
                                                   >> 658          * of a chain of data-dependent loads, meaning most CPUs (alpha
                                                   >> 659          * being the notable exception) will already guarantee loads are
                                                   >> 660          * seen in-order. See the alpha page table accessors for the
                                                   >> 661          * smp_read_barrier_depends() barriers in page table walking code.
                                                   >> 662          */
                                                   >> 663         smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
                                                   >> 664 
                                                   >> 665         ptl = pmd_lock(mm, pmd);
                                                   >> 666         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
                                                   >> 667                 atomic_long_inc(&mm->nr_ptes);
                                                   >> 668                 pmd_populate(mm, pmd, new);
                                                   >> 669                 new = NULL;
                                                   >> 670         }
                                                   >> 671         spin_unlock(ptl);
453         if (new)                                  672         if (new)
454                 pte_free(mm, new);                673                 pte_free(mm, new);
455         return 0;                                 674         return 0;
456 }                                                 675 }
457                                                   676 
458 int __pte_alloc_kernel(pmd_t *pmd)             !! 677 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
459 {                                                 678 {
460         pte_t *new = pte_alloc_one_kernel(&ini !! 679         pte_t *new = pte_alloc_one_kernel(&init_mm, address);
461         if (!new)                                 680         if (!new)
462                 return -ENOMEM;                   681                 return -ENOMEM;
463                                                   682 
                                                   >> 683         smp_wmb(); /* See comment in __pte_alloc */
                                                   >> 684 
464         spin_lock(&init_mm.page_table_lock);      685         spin_lock(&init_mm.page_table_lock);
465         if (likely(pmd_none(*pmd))) {   /* Has    686         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
466                 smp_wmb(); /* See comment in p << 
467                 pmd_populate_kernel(&init_mm,     687                 pmd_populate_kernel(&init_mm, pmd, new);
468                 new = NULL;                       688                 new = NULL;
469         }                                         689         }
470         spin_unlock(&init_mm.page_table_lock);    690         spin_unlock(&init_mm.page_table_lock);
471         if (new)                                  691         if (new)
472                 pte_free_kernel(&init_mm, new)    692                 pte_free_kernel(&init_mm, new);
473         return 0;                                 693         return 0;
474 }                                                 694 }
475                                                   695 
476 static inline void init_rss_vec(int *rss)         696 static inline void init_rss_vec(int *rss)
477 {                                                 697 {
478         memset(rss, 0, sizeof(int) * NR_MM_COU    698         memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
479 }                                                 699 }
480                                                   700 
481 static inline void add_mm_rss_vec(struct mm_st    701 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
482 {                                                 702 {
483         int i;                                    703         int i;
484                                                   704 
                                                   >> 705         if (current->mm == mm)
                                                   >> 706                 sync_mm_rss(mm);
485         for (i = 0; i < NR_MM_COUNTERS; i++)      707         for (i = 0; i < NR_MM_COUNTERS; i++)
486                 if (rss[i])                       708                 if (rss[i])
487                         add_mm_counter(mm, i,     709                         add_mm_counter(mm, i, rss[i]);
488 }                                                 710 }
489                                                   711 
490 /*                                                712 /*
491  * This function is called to print an error w    713  * This function is called to print an error when a bad pte
492  * is found. For example, we might have a PFN-    714  * is found. For example, we might have a PFN-mapped pte in
493  * a region that doesn't allow it.                715  * a region that doesn't allow it.
494  *                                                716  *
495  * The calling function must still handle the     717  * The calling function must still handle the error.
496  */                                               718  */
497 static void print_bad_pte(struct vm_area_struc    719 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
498                           pte_t pte, struct pa    720                           pte_t pte, struct page *page)
499 {                                                 721 {
500         pgd_t *pgd = pgd_offset(vma->vm_mm, ad    722         pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
501         p4d_t *p4d = p4d_offset(pgd, addr);       723         p4d_t *p4d = p4d_offset(pgd, addr);
502         pud_t *pud = pud_offset(p4d, addr);       724         pud_t *pud = pud_offset(p4d, addr);
503         pmd_t *pmd = pmd_offset(pud, addr);       725         pmd_t *pmd = pmd_offset(pud, addr);
504         struct address_space *mapping;            726         struct address_space *mapping;
505         pgoff_t index;                            727         pgoff_t index;
506         static unsigned long resume;              728         static unsigned long resume;
507         static unsigned long nr_shown;            729         static unsigned long nr_shown;
508         static unsigned long nr_unshown;          730         static unsigned long nr_unshown;
509                                                   731 
510         /*                                        732         /*
511          * Allow a burst of 60 reports, then k    733          * Allow a burst of 60 reports, then keep quiet for that minute;
512          * or allow a steady drip of one repor    734          * or allow a steady drip of one report per second.
513          */                                       735          */
514         if (nr_shown == 60) {                     736         if (nr_shown == 60) {
515                 if (time_before(jiffies, resum    737                 if (time_before(jiffies, resume)) {
516                         nr_unshown++;             738                         nr_unshown++;
517                         return;                   739                         return;
518                 }                                 740                 }
519                 if (nr_unshown) {                 741                 if (nr_unshown) {
520                         pr_alert("BUG: Bad pag    742                         pr_alert("BUG: Bad page map: %lu messages suppressed\n",
521                                  nr_unshown);     743                                  nr_unshown);
522                         nr_unshown = 0;           744                         nr_unshown = 0;
523                 }                                 745                 }
524                 nr_shown = 0;                     746                 nr_shown = 0;
525         }                                         747         }
526         if (nr_shown++ == 0)                      748         if (nr_shown++ == 0)
527                 resume = jiffies + 60 * HZ;       749                 resume = jiffies + 60 * HZ;
528                                                   750 
529         mapping = vma->vm_file ? vma->vm_file-    751         mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
530         index = linear_page_index(vma, addr);     752         index = linear_page_index(vma, addr);
531                                                   753 
532         pr_alert("BUG: Bad page map in process    754         pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
533                  current->comm,                   755                  current->comm,
534                  (long long)pte_val(pte), (lon    756                  (long long)pte_val(pte), (long long)pmd_val(*pmd));
535         if (page)                                 757         if (page)
536                 dump_page(page, "bad pte");       758                 dump_page(page, "bad pte");
537         pr_alert("addr:%px vm_flags:%08lx anon !! 759         pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
538                  (void *)addr, vma->vm_flags,     760                  (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
539         pr_alert("file:%pD fault:%ps mmap:%ps  !! 761         /*
                                                   >> 762          * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
                                                   >> 763          */
                                                   >> 764         pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
540                  vma->vm_file,                    765                  vma->vm_file,
541                  vma->vm_ops ? vma->vm_ops->fa    766                  vma->vm_ops ? vma->vm_ops->fault : NULL,
542                  vma->vm_file ? vma->vm_file->    767                  vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
543                  mapping ? mapping->a_ops->rea !! 768                  mapping ? mapping->a_ops->readpage : NULL);
544         dump_stack();                             769         dump_stack();
545         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_    770         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
546 }                                                 771 }
547                                                   772 
548 /*                                                773 /*
549  * vm_normal_page -- This function gets the "s    774  * vm_normal_page -- This function gets the "struct page" associated with a pte.
550  *                                                775  *
551  * "Special" mappings do not wish to be associ    776  * "Special" mappings do not wish to be associated with a "struct page" (either
552  * it doesn't exist, or it exists but they don    777  * it doesn't exist, or it exists but they don't want to touch it). In this
553  * case, NULL is returned here. "Normal" mappi    778  * case, NULL is returned here. "Normal" mappings do have a struct page.
554  *                                                779  *
555  * There are 2 broad cases. Firstly, an archit    780  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
556  * pte bit, in which case this function is tri    781  * pte bit, in which case this function is trivial. Secondly, an architecture
557  * may not have a spare pte bit, which require    782  * may not have a spare pte bit, which requires a more complicated scheme,
558  * described below.                               783  * described below.
559  *                                                784  *
560  * A raw VM_PFNMAP mapping (ie. one that is no    785  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
561  * special mapping (even if there are underlyi    786  * special mapping (even if there are underlying and valid "struct pages").
562  * COWed pages of a VM_PFNMAP are always norma    787  * COWed pages of a VM_PFNMAP are always normal.
563  *                                                788  *
564  * The way we recognize COWed pages within VM_    789  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
565  * rules set up by "remap_pfn_range()": the vm    790  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
566  * set, and the vm_pgoff will point to the fir    791  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
567  * mapping will always honor the rule             792  * mapping will always honor the rule
568  *                                                793  *
569  *      pfn_of_page == vma->vm_pgoff + ((addr     794  *      pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
570  *                                                795  *
571  * And for normal mappings this is false.         796  * And for normal mappings this is false.
572  *                                                797  *
573  * This restricts such mappings to be a linear    798  * This restricts such mappings to be a linear translation from virtual address
574  * to pfn. To get around this restriction, we     799  * to pfn. To get around this restriction, we allow arbitrary mappings so long
575  * as the vma is not a COW mapping; in that ca    800  * as the vma is not a COW mapping; in that case, we know that all ptes are
576  * special (because none can have been COWed).    801  * special (because none can have been COWed).
577  *                                                802  *
578  *                                                803  *
579  * In order to support COW of arbitrary specia    804  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
580  *                                                805  *
581  * VM_MIXEDMAP mappings can likewise contain m    806  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
582  * page" backing, however the difference is th    807  * page" backing, however the difference is that _all_ pages with a struct
583  * page (that is, those where pfn_valid is tru    808  * page (that is, those where pfn_valid is true) are refcounted and considered
584  * normal pages by the VM. The only exception  !! 809  * normal pages by the VM. The disadvantage is that pages are refcounted
585  * *never* refcounted.                         !! 810  * (which can be slower and simply not an option for some PFNMAP users). The
586  *                                             !! 811  * advantage is that we don't have to follow the strict linearity rule of
587  * The disadvantage is that pages are refcount !! 812  * PFNMAP mappings in order to support COWable mappings.
588  * simply not an option for some PFNMAP users) << 
589  * don't have to follow the strict linearity r << 
590  * order to support COWable mappings.          << 
591  *                                                813  *
592  */                                               814  */
                                                   >> 815 #ifdef __HAVE_ARCH_PTE_SPECIAL
                                                   >> 816 # define HAVE_PTE_SPECIAL 1
                                                   >> 817 #else
                                                   >> 818 # define HAVE_PTE_SPECIAL 0
                                                   >> 819 #endif
593 struct page *vm_normal_page(struct vm_area_str    820 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
594                             pte_t pte)         !! 821                                 pte_t pte)
595 {                                                 822 {
596         unsigned long pfn = pte_pfn(pte);         823         unsigned long pfn = pte_pfn(pte);
597                                                   824 
598         if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPE !! 825         if (HAVE_PTE_SPECIAL) {
599                 if (likely(!pte_special(pte)))    826                 if (likely(!pte_special(pte)))
600                         goto check_pfn;           827                         goto check_pfn;
601                 if (vma->vm_ops && vma->vm_ops    828                 if (vma->vm_ops && vma->vm_ops->find_special_page)
602                         return vma->vm_ops->fi    829                         return vma->vm_ops->find_special_page(vma, addr);
603                 if (vma->vm_flags & (VM_PFNMAP    830                 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
604                         return NULL;              831                         return NULL;
605                 if (is_zero_pfn(pfn))          !! 832                 if (!is_zero_pfn(pfn))
606                         return NULL;           !! 833                         print_bad_pte(vma, addr, pte, NULL);
607                 if (pte_devmap(pte))           << 
608                 /*                             << 
609                  * NOTE: New users of ZONE_DEV << 
610                  * and will have refcounts inc << 
611                  * when they are inserted into << 
612                  * return here. Legacy ZONE_DE << 
613                  * do not have refcounts. Exam << 
614                  * MEMORY_DEVICE_FS_DAX type i << 
615                  */                            << 
616                         return NULL;           << 
617                                                << 
618                 print_bad_pte(vma, addr, pte,  << 
619                 return NULL;                      834                 return NULL;
620         }                                         835         }
621                                                   836 
622         /* !CONFIG_ARCH_HAS_PTE_SPECIAL case f !! 837         /* !HAVE_PTE_SPECIAL case follows: */
623                                                   838 
624         if (unlikely(vma->vm_flags & (VM_PFNMA    839         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
625                 if (vma->vm_flags & VM_MIXEDMA    840                 if (vma->vm_flags & VM_MIXEDMAP) {
626                         if (!pfn_valid(pfn))      841                         if (!pfn_valid(pfn))
627                                 return NULL;      842                                 return NULL;
628                         if (is_zero_pfn(pfn))  << 
629                                 return NULL;   << 
630                         goto out;                 843                         goto out;
631                 } else {                          844                 } else {
632                         unsigned long off;        845                         unsigned long off;
633                         off = (addr - vma->vm_    846                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
634                         if (pfn == vma->vm_pgo    847                         if (pfn == vma->vm_pgoff + off)
635                                 return NULL;      848                                 return NULL;
636                         if (!is_cow_mapping(vm    849                         if (!is_cow_mapping(vma->vm_flags))
637                                 return NULL;      850                                 return NULL;
638                 }                                 851                 }
639         }                                         852         }
640                                                   853 
641         if (is_zero_pfn(pfn))                     854         if (is_zero_pfn(pfn))
642                 return NULL;                      855                 return NULL;
643                                                << 
644 check_pfn:                                        856 check_pfn:
645         if (unlikely(pfn > highest_memmap_pfn)    857         if (unlikely(pfn > highest_memmap_pfn)) {
646                 print_bad_pte(vma, addr, pte,     858                 print_bad_pte(vma, addr, pte, NULL);
647                 return NULL;                      859                 return NULL;
648         }                                         860         }
649                                                   861 
650         /*                                        862         /*
651          * NOTE! We still have PageReserved()     863          * NOTE! We still have PageReserved() pages in the page tables.
652          * eg. VDSO mappings can cause them to    864          * eg. VDSO mappings can cause them to exist.
653          */                                       865          */
654 out:                                              866 out:
655         VM_WARN_ON_ONCE(is_zero_pfn(pfn));     << 
656         return pfn_to_page(pfn);                  867         return pfn_to_page(pfn);
657 }                                                 868 }
658                                                   869 
659 struct folio *vm_normal_folio(struct vm_area_s !! 870 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
660                             pte_t pte)         << 
661 {                                              << 
662         struct page *page = vm_normal_page(vma << 
663                                                << 
664         if (page)                              << 
665                 return page_folio(page);       << 
666         return NULL;                           << 
667 }                                              << 
668                                                << 
669 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES          << 
670 struct page *vm_normal_page_pmd(struct vm_area    871 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
671                                 pmd_t pmd)        872                                 pmd_t pmd)
672 {                                                 873 {
673         unsigned long pfn = pmd_pfn(pmd);         874         unsigned long pfn = pmd_pfn(pmd);
674                                                   875 
675         /* Currently it's only used for huge p !! 876         /*
676         if (unlikely(pmd_special(pmd)))        !! 877          * There is no pmd_special() but there may be special pmds, e.g.
677                 return NULL;                   !! 878          * in a direct-access (dax) mapping, so let's just replicate the
678                                                !! 879          * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
                                                   >> 880          */
679         if (unlikely(vma->vm_flags & (VM_PFNMA    881         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
680                 if (vma->vm_flags & VM_MIXEDMA    882                 if (vma->vm_flags & VM_MIXEDMAP) {
681                         if (!pfn_valid(pfn))      883                         if (!pfn_valid(pfn))
682                                 return NULL;      884                                 return NULL;
683                         goto out;                 885                         goto out;
684                 } else {                          886                 } else {
685                         unsigned long off;        887                         unsigned long off;
686                         off = (addr - vma->vm_    888                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
687                         if (pfn == vma->vm_pgo    889                         if (pfn == vma->vm_pgoff + off)
688                                 return NULL;      890                                 return NULL;
689                         if (!is_cow_mapping(vm    891                         if (!is_cow_mapping(vma->vm_flags))
690                                 return NULL;      892                                 return NULL;
691                 }                                 893                 }
692         }                                         894         }
693                                                   895 
694         if (pmd_devmap(pmd))                   !! 896         if (is_zero_pfn(pfn))
695                 return NULL;                   << 
696         if (is_huge_zero_pmd(pmd))             << 
697                 return NULL;                      897                 return NULL;
698         if (unlikely(pfn > highest_memmap_pfn)    898         if (unlikely(pfn > highest_memmap_pfn))
699                 return NULL;                      899                 return NULL;
700                                                   900 
701         /*                                        901         /*
702          * NOTE! We still have PageReserved()     902          * NOTE! We still have PageReserved() pages in the page tables.
703          * eg. VDSO mappings can cause them to    903          * eg. VDSO mappings can cause them to exist.
704          */                                       904          */
705 out:                                              905 out:
706         return pfn_to_page(pfn);                  906         return pfn_to_page(pfn);
707 }                                                 907 }
708                                                << 
709 struct folio *vm_normal_folio_pmd(struct vm_ar << 
710                                   unsigned lon << 
711 {                                              << 
712         struct page *page = vm_normal_page_pmd << 
713                                                << 
714         if (page)                              << 
715                 return page_folio(page);       << 
716         return NULL;                           << 
717 }                                              << 
718 #endif                                            908 #endif
719                                                   909 
720 static void restore_exclusive_pte(struct vm_ar << 
721                                   struct page  << 
722                                   pte_t *ptep) << 
723 {                                              << 
724         struct folio *folio = page_folio(page) << 
725         pte_t orig_pte;                        << 
726         pte_t pte;                             << 
727         swp_entry_t entry;                     << 
728                                                << 
729         orig_pte = ptep_get(ptep);             << 
730         pte = pte_mkold(mk_pte(page, READ_ONCE << 
731         if (pte_swp_soft_dirty(orig_pte))      << 
732                 pte = pte_mksoft_dirty(pte);   << 
733                                                << 
734         entry = pte_to_swp_entry(orig_pte);    << 
735         if (pte_swp_uffd_wp(orig_pte))         << 
736                 pte = pte_mkuffd_wp(pte);      << 
737         else if (is_writable_device_exclusive_ << 
738                 pte = maybe_mkwrite(pte_mkdirt << 
739                                                << 
740         VM_BUG_ON_FOLIO(pte_write(pte) && (!fo << 
741                                            Pag << 
742                                                << 
743         /*                                     << 
744          * No need to take a page reference as << 
745          * created when the swap entry was mad << 
746          */                                    << 
747         if (folio_test_anon(folio))            << 
748                 folio_add_anon_rmap_pte(folio, << 
749         else                                   << 
750                 /*                             << 
751                  * Currently device exclusive  << 
752                  * memory so the entry shouldn << 
753                  */                            << 
754                 WARN_ON_ONCE(1);               << 
755                                                << 
756         set_pte_at(vma->vm_mm, address, ptep,  << 
757                                                << 
758         /*                                     << 
759          * No need to invalidate - it was non- << 
760          * secondary CPUs may have mappings th << 
761          */                                    << 
762         update_mmu_cache(vma, address, ptep);  << 
763 }                                              << 
764                                                << 
765 /*                                             << 
766  * Tries to restore an exclusive pte if the pa << 
767  * sleeping.                                   << 
768  */                                            << 
769 static int                                     << 
770 try_restore_exclusive_pte(pte_t *src_pte, stru << 
771                         unsigned long addr)    << 
772 {                                              << 
773         swp_entry_t entry = pte_to_swp_entry(p << 
774         struct page *page = pfn_swap_entry_to_ << 
775                                                << 
776         if (trylock_page(page)) {              << 
777                 restore_exclusive_pte(vma, pag << 
778                 unlock_page(page);             << 
779                 return 0;                      << 
780         }                                      << 
781                                                << 
782         return -EBUSY;                         << 
783 }                                              << 
784                                                << 
785 /*                                                910 /*
786  * copy one vm_area from one task to the other    911  * copy one vm_area from one task to the other. Assumes the page tables
787  * already present in the new task to be clear    912  * already present in the new task to be cleared in the whole range
788  * covered by this vma.                           913  * covered by this vma.
789  */                                               914  */
790                                                   915 
791 static unsigned long                           !! 916 static inline unsigned long
792 copy_nonpresent_pte(struct mm_struct *dst_mm,  !! 917 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
793                 pte_t *dst_pte, pte_t *src_pte !! 918                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
794                 struct vm_area_struct *src_vma !! 919                 unsigned long addr, int *rss)
795 {                                              !! 920 {
796         unsigned long vm_flags = dst_vma->vm_f !! 921         unsigned long vm_flags = vma->vm_flags;
797         pte_t orig_pte = ptep_get(src_pte);    !! 922         pte_t pte = *src_pte;
798         pte_t pte = orig_pte;                  << 
799         struct folio *folio;                   << 
800         struct page *page;                        923         struct page *page;
801         swp_entry_t entry = pte_to_swp_entry(o << 
802                                                << 
803         if (likely(!non_swap_entry(entry))) {  << 
804                 if (swap_duplicate(entry) < 0) << 
805                         return -EIO;           << 
806                                                << 
807                 /* make sure dst_mm is on swap << 
808                 if (unlikely(list_empty(&dst_m << 
809                         spin_lock(&mmlist_lock << 
810                         if (list_empty(&dst_mm << 
811                                 list_add(&dst_ << 
812                                                << 
813                         spin_unlock(&mmlist_lo << 
814                 }                              << 
815                 /* Mark the swap entry as shar << 
816                 if (pte_swp_exclusive(orig_pte << 
817                         pte = pte_swp_clear_ex << 
818                         set_pte_at(src_mm, add << 
819                 }                              << 
820                 rss[MM_SWAPENTS]++;            << 
821         } else if (is_migration_entry(entry))  << 
822                 folio = pfn_swap_entry_folio(e << 
823                                                   924 
824                 rss[mm_counter(folio)]++;      !! 925         /* pte contains position in swap or file, so copy. */
825                                                !! 926         if (unlikely(!pte_present(pte))) {
826                 if (!is_readable_migration_ent !! 927                 swp_entry_t entry = pte_to_swp_entry(pte);
827                                 is_cow_mapping !! 928 
828                         /*                     !! 929                 if (likely(!non_swap_entry(entry))) {
829                          * COW mappings requir !! 930                         if (swap_duplicate(entry) < 0)
830                          * to be set to read.  !! 931                                 return entry.val;
831                          * now shared.         !! 932 
832                          */                    !! 933                         /* make sure dst_mm is on swapoff's mmlist. */
833                         entry = make_readable_ !! 934                         if (unlikely(list_empty(&dst_mm->mmlist))) {
834                                                !! 935                                 spin_lock(&mmlist_lock);
835                         pte = swp_entry_to_pte !! 936                                 if (list_empty(&dst_mm->mmlist))
836                         if (pte_swp_soft_dirty !! 937                                         list_add(&dst_mm->mmlist,
837                                 pte = pte_swp_ !! 938                                                         &src_mm->mmlist);
838                         if (pte_swp_uffd_wp(or !! 939                                 spin_unlock(&mmlist_lock);
839                                 pte = pte_swp_ !! 940                         }
840                         set_pte_at(src_mm, add !! 941                         rss[MM_SWAPENTS]++;
841                 }                              !! 942                 } else if (is_migration_entry(entry)) {
842         } else if (is_device_private_entry(ent !! 943                         page = migration_entry_to_page(entry);
843                 page = pfn_swap_entry_to_page( << 
844                 folio = page_folio(page);      << 
845                                                   944 
846                 /*                             !! 945                         rss[mm_counter(page)]++;
847                  * Update rss count even for u << 
848                  * they should treated just li << 
849                  * respect.                    << 
850                  *                             << 
851                  * We will likely want to have << 
852                  * for unaddressable pages, at << 
853                  * keep things as they are.    << 
854                  */                            << 
855                 folio_get(folio);              << 
856                 rss[mm_counter(folio)]++;      << 
857                 /* Cannot fail as these pages  << 
858                 folio_try_dup_anon_rmap_pte(fo << 
859                                                   946 
860                 /*                             !! 947                         if (is_write_migration_entry(entry) &&
861                  * We do not preserve soft-dir !! 948                                         is_cow_mapping(vm_flags)) {
862                  * far, checkpoint/restore is  !! 949                                 /*
863                  * requires that. And checkpoi !! 950                                  * COW mappings require pages in both
864                  * when a device driver is inv !! 951                                  * parent and child to be set to read.
865                  * save and restore device dri !! 952                                  */
866                  */                            !! 953                                 make_migration_entry_read(&entry);
867                 if (is_writable_device_private !! 954                                 pte = swp_entry_to_pte(entry);
868                     is_cow_mapping(vm_flags))  !! 955                                 if (pte_swp_soft_dirty(*src_pte))
869                         entry = make_readable_ !! 956                                         pte = pte_swp_mksoft_dirty(pte);
870                                                !! 957                                 set_pte_at(src_mm, addr, src_pte, pte);
871                         pte = swp_entry_to_pte !! 958                         }
872                         if (pte_swp_uffd_wp(or << 
873                                 pte = pte_swp_ << 
874                         set_pte_at(src_mm, add << 
875                 }                                 959                 }
876         } else if (is_device_exclusive_entry(e !! 960                 goto out_set_pte;
877                 /*                             << 
878                  * Make device exclusive entri << 
879                  * original entry then copying << 
880                  * exclusive entries currently << 
881                  * (ie. COW) mappings.         << 
882                  */                            << 
883                 VM_BUG_ON(!is_cow_mapping(src_ << 
884                 if (try_restore_exclusive_pte( << 
885                         return -EBUSY;         << 
886                 return -ENOENT;                << 
887         } else if (is_pte_marker_entry(entry)) << 
888                 pte_marker marker = copy_pte_m << 
889                                                << 
890                 if (marker)                    << 
891                         set_pte_at(dst_mm, add << 
892                                    make_pte_ma << 
893                 return 0;                      << 
894         }                                         961         }
895         if (!userfaultfd_wp(dst_vma))          << 
896                 pte = pte_swp_clear_uffd_wp(pt << 
897         set_pte_at(dst_mm, addr, dst_pte, pte) << 
898         return 0;                              << 
899 }                                              << 
900                                                   962 
901 /*                                             !! 963         /*
902  * Copy a present and normal page.             !! 964          * If it's a COW mapping, write protect it both
903  *                                             !! 965          * in the parent and the child
904  * NOTE! The usual case is that this isn't req !! 966          */
905  * instead, the caller can just increase the p !! 967         if (is_cow_mapping(vm_flags)) {
906  * and re-use the pte the traditional way.     !! 968                 ptep_set_wrprotect(src_mm, addr, src_pte);
907  *                                             << 
908  * And if we need a pre-allocated page but don << 
909  * one, return a negative error to let the pre << 
910  * code know so that it can do so outside the  << 
911  * lock.                                       << 
912  */                                            << 
913 static inline int                              << 
914 copy_present_page(struct vm_area_struct *dst_v << 
915                   pte_t *dst_pte, pte_t *src_p << 
916                   struct folio **prealloc, str << 
917 {                                              << 
918         struct folio *new_folio;               << 
919         pte_t pte;                             << 
920                                                << 
921         new_folio = *prealloc;                 << 
922         if (!new_folio)                        << 
923                 return -EAGAIN;                << 
924                                                << 
925         /*                                     << 
926          * We have a prealloc page, all good!  << 
927          * over and copy the page & arm it.    << 
928          */                                    << 
929                                                << 
930         if (copy_mc_user_highpage(&new_folio-> << 
931                 return -EHWPOISON;             << 
932                                                << 
933         *prealloc = NULL;                      << 
934         __folio_mark_uptodate(new_folio);      << 
935         folio_add_new_anon_rmap(new_folio, dst << 
936         folio_add_lru_vma(new_folio, dst_vma); << 
937         rss[MM_ANONPAGES]++;                   << 
938                                                << 
939         /* All done, just insert the new page  << 
940         pte = mk_pte(&new_folio->page, dst_vma << 
941         pte = maybe_mkwrite(pte_mkdirty(pte),  << 
942         if (userfaultfd_pte_wp(dst_vma, ptep_g << 
943                 /* Uffd-wp needs to be deliver << 
944                 pte = pte_mkuffd_wp(pte);      << 
945         set_pte_at(dst_vma->vm_mm, addr, dst_p << 
946         return 0;                              << 
947 }                                              << 
948                                                << 
949 static __always_inline void __copy_present_pte << 
950                 struct vm_area_struct *src_vma << 
951                 pte_t pte, unsigned long addr, << 
952 {                                              << 
953         struct mm_struct *src_mm = src_vma->vm << 
954                                                << 
955         /* If it's a COW mapping, write protec << 
956         if (is_cow_mapping(src_vma->vm_flags)  << 
957                 wrprotect_ptes(src_mm, addr, s << 
958                 pte = pte_wrprotect(pte);         969                 pte = pte_wrprotect(pte);
959         }                                         970         }
960                                                   971 
961         /* If it's a shared mapping, mark it c !! 972         /*
962         if (src_vma->vm_flags & VM_SHARED)     !! 973          * If it's a shared mapping, mark it clean in
                                                   >> 974          * the child
                                                   >> 975          */
                                                   >> 976         if (vm_flags & VM_SHARED)
963                 pte = pte_mkclean(pte);           977                 pte = pte_mkclean(pte);
964         pte = pte_mkold(pte);                     978         pte = pte_mkold(pte);
965                                                   979 
966         if (!userfaultfd_wp(dst_vma))          !! 980         page = vm_normal_page(vma, addr, pte);
967                 pte = pte_clear_uffd_wp(pte);  !! 981         if (page) {
968                                                !! 982                 get_page(page);
969         set_ptes(dst_vma->vm_mm, addr, dst_pte !! 983                 page_dup_rmap(page, false);
970 }                                              !! 984                 rss[mm_counter(page)]++;
971                                                << 
972 /*                                             << 
973  * Copy one present PTE, trying to batch-proce << 
974  * consecutive pages of the same folio by copy << 
975  *                                             << 
976  * Returns -EAGAIN if one preallocated page is << 
977  * Otherwise, returns the number of copied PTE << 
978  */                                            << 
979 static inline int                              << 
980 copy_present_ptes(struct vm_area_struct *dst_v << 
981                  pte_t *dst_pte, pte_t *src_pt << 
982                  int max_nr, int *rss, struct  << 
983 {                                              << 
984         struct page *page;                     << 
985         struct folio *folio;                   << 
986         bool any_writable;                     << 
987         fpb_t flags = 0;                       << 
988         int err, nr;                           << 
989                                                << 
990         page = vm_normal_page(src_vma, addr, p << 
991         if (unlikely(!page))                   << 
992                 goto copy_pte;                 << 
993                                                << 
994         folio = page_folio(page);              << 
995                                                << 
996         /*                                     << 
997          * If we likely have to copy, just don << 
998          * sure that the common "small folio"  << 
999          * by keeping the batching logic separ << 
1000          */                                   << 
1001         if (unlikely(!*prealloc && folio_test << 
1002                 if (src_vma->vm_flags & VM_SH << 
1003                         flags |= FPB_IGNORE_D << 
1004                 if (!vma_soft_dirty_enabled(s << 
1005                         flags |= FPB_IGNORE_S << 
1006                                               << 
1007                 nr = folio_pte_batch(folio, a << 
1008                                      &any_wri << 
1009                 folio_ref_add(folio, nr);     << 
1010                 if (folio_test_anon(folio)) { << 
1011                         if (unlikely(folio_tr << 
1012                                               << 
1013                                 folio_ref_sub << 
1014                                 return -EAGAI << 
1015                         }                     << 
1016                         rss[MM_ANONPAGES] +=  << 
1017                         VM_WARN_ON_FOLIO(Page << 
1018                 } else {                      << 
1019                         folio_dup_file_rmap_p << 
1020                         rss[mm_counter_file(f << 
1021                 }                             << 
1022                 if (any_writable)             << 
1023                         pte = pte_mkwrite(pte << 
1024                 __copy_present_ptes(dst_vma,  << 
1025                                     addr, nr) << 
1026                 return nr;                    << 
1027         }                                        985         }
1028                                                  986 
1029         folio_get(folio);                     !! 987 out_set_pte:
1030         if (folio_test_anon(folio)) {         !! 988         set_pte_at(dst_mm, addr, dst_pte, pte);
1031                 /*                            !! 989         return 0;
1032                  * If this page may have been << 
1033                  * copy the page immediately  << 
1034                  * guarantee the pinned page  << 
1035                  * future.                    << 
1036                  */                           << 
1037                 if (unlikely(folio_try_dup_an << 
1038                         /* Page may be pinned << 
1039                         folio_put(folio);     << 
1040                         err = copy_present_pa << 
1041                                               << 
1042                         return err ? err : 1; << 
1043                 }                             << 
1044                 rss[MM_ANONPAGES]++;          << 
1045                 VM_WARN_ON_FOLIO(PageAnonExcl << 
1046         } else {                              << 
1047                 folio_dup_file_rmap_pte(folio << 
1048                 rss[mm_counter_file(folio)]++ << 
1049         }                                     << 
1050                                               << 
1051 copy_pte:                                     << 
1052         __copy_present_ptes(dst_vma, src_vma, << 
1053         return 1;                             << 
1054 }                                             << 
1055                                               << 
1056 static inline struct folio *folio_prealloc(st << 
1057                 struct vm_area_struct *vma, u << 
1058 {                                             << 
1059         struct folio *new_folio;              << 
1060                                               << 
1061         if (need_zero)                        << 
1062                 new_folio = vma_alloc_zeroed_ << 
1063         else                                  << 
1064                 new_folio = vma_alloc_folio(G << 
1065                                             a << 
1066                                               << 
1067         if (!new_folio)                       << 
1068                 return NULL;                  << 
1069                                               << 
1070         if (mem_cgroup_charge(new_folio, src_ << 
1071                 folio_put(new_folio);         << 
1072                 return NULL;                  << 
1073         }                                     << 
1074         folio_throttle_swaprate(new_folio, GF << 
1075                                               << 
1076         return new_folio;                     << 
1077 }                                                990 }
1078                                                  991 
1079 static int                                    !! 992 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1080 copy_pte_range(struct vm_area_struct *dst_vma !! 993                    pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
1081                pmd_t *dst_pmd, pmd_t *src_pmd !! 994                    unsigned long addr, unsigned long end)
1082                unsigned long end)             << 
1083 {                                                995 {
1084         struct mm_struct *dst_mm = dst_vma->v << 
1085         struct mm_struct *src_mm = src_vma->v << 
1086         pte_t *orig_src_pte, *orig_dst_pte;      996         pte_t *orig_src_pte, *orig_dst_pte;
1087         pte_t *src_pte, *dst_pte;                997         pte_t *src_pte, *dst_pte;
1088         pte_t ptent;                          << 
1089         spinlock_t *src_ptl, *dst_ptl;           998         spinlock_t *src_ptl, *dst_ptl;
1090         int progress, max_nr, ret = 0;        !! 999         int progress = 0;
1091         int rss[NR_MM_COUNTERS];                 1000         int rss[NR_MM_COUNTERS];
1092         swp_entry_t entry = (swp_entry_t){0};    1001         swp_entry_t entry = (swp_entry_t){0};
1093         struct folio *prealloc = NULL;        << 
1094         int nr;                               << 
1095                                                  1002 
1096 again:                                           1003 again:
1097         progress = 0;                         << 
1098         init_rss_vec(rss);                       1004         init_rss_vec(rss);
1099                                                  1005 
1100         /*                                    << 
1101          * copy_pmd_range()'s prior pmd_none_ << 
1102          * error handling here, assume that e << 
1103          * protects anon from unexpected THP  << 
1104          * protected by mmap_lock-less collap << 
1105          * (whereas vma_needs_copy() skips ar << 
1106          * can remove such assumptions later, << 
1107          */                                   << 
1108         dst_pte = pte_alloc_map_lock(dst_mm,     1006         dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1109         if (!dst_pte) {                       !! 1007         if (!dst_pte)
1110                 ret = -ENOMEM;                !! 1008                 return -ENOMEM;
1111                 goto out;                     !! 1009         src_pte = pte_offset_map(src_pmd, addr);
1112         }                                     !! 1010         src_ptl = pte_lockptr(src_mm, src_pmd);
1113         src_pte = pte_offset_map_nolock(src_m << 
1114         if (!src_pte) {                       << 
1115                 pte_unmap_unlock(dst_pte, dst << 
1116                 /* ret == 0 */                << 
1117                 goto out;                     << 
1118         }                                     << 
1119         spin_lock_nested(src_ptl, SINGLE_DEPT    1011         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1120         orig_src_pte = src_pte;                  1012         orig_src_pte = src_pte;
1121         orig_dst_pte = dst_pte;                  1013         orig_dst_pte = dst_pte;
1122         arch_enter_lazy_mmu_mode();              1014         arch_enter_lazy_mmu_mode();
1123                                                  1015 
1124         do {                                     1016         do {
1125                 nr = 1;                       << 
1126                                               << 
1127                 /*                               1017                 /*
1128                  * We are holding two locks a    1018                  * We are holding two locks at this point - either of them
1129                  * could generate latencies i    1019                  * could generate latencies in another task on another CPU.
1130                  */                              1020                  */
1131                 if (progress >= 32) {            1021                 if (progress >= 32) {
1132                         progress = 0;            1022                         progress = 0;
1133                         if (need_resched() ||    1023                         if (need_resched() ||
1134                             spin_needbreak(sr    1024                             spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1135                                 break;           1025                                 break;
1136                 }                                1026                 }
1137                 ptent = ptep_get(src_pte);    !! 1027                 if (pte_none(*src_pte)) {
1138                 if (pte_none(ptent)) {        << 
1139                         progress++;              1028                         progress++;
1140                         continue;                1029                         continue;
1141                 }                                1030                 }
1142                 if (unlikely(!pte_present(pte !! 1031                 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
1143                         ret = copy_nonpresent !! 1032                                                         vma, addr, rss);
1144                                               !! 1033                 if (entry.val)
1145                                               << 
1146                                               << 
1147                         if (ret == -EIO) {    << 
1148                                 entry = pte_t << 
1149                                 break;        << 
1150                         } else if (ret == -EB << 
1151                                 break;        << 
1152                         } else if (!ret) {    << 
1153                                 progress += 8 << 
1154                                 continue;     << 
1155                         }                     << 
1156                         ptent = ptep_get(src_ << 
1157                         VM_WARN_ON_ONCE(!pte_ << 
1158                                               << 
1159                         /*                    << 
1160                          * Device exclusive e << 
1161                          * the now present pt << 
1162                          */                   << 
1163                         WARN_ON_ONCE(ret != - << 
1164                 }                             << 
1165                 /* copy_present_ptes() will c << 
1166                 max_nr = (end - addr) / PAGE_ << 
1167                 ret = copy_present_ptes(dst_v << 
1168                                         ptent << 
1169                 /*                            << 
1170                  * If we need a pre-allocated << 
1171                  * locks, allocate, and try a << 
1172                  * If copy failed due to hwpo << 
1173                  */                           << 
1174                 if (unlikely(ret == -EAGAIN | << 
1175                         break;                   1034                         break;
1176                 if (unlikely(prealloc)) {     !! 1035                 progress += 8;
1177                         /*                    !! 1036         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1178                          * pre-alloc page can << 
1179                          * to strictly follow << 
1180                          * will allocate page << 
1181                          * could only happen  << 
1182                          */                   << 
1183                         folio_put(prealloc);  << 
1184                         prealloc = NULL;      << 
1185                 }                             << 
1186                 nr = ret;                     << 
1187                 progress += 8 * nr;           << 
1188         } while (dst_pte += nr, src_pte += nr << 
1189                  addr != end);                << 
1190                                                  1037 
1191         arch_leave_lazy_mmu_mode();              1038         arch_leave_lazy_mmu_mode();
1192         pte_unmap_unlock(orig_src_pte, src_pt !! 1039         spin_unlock(src_ptl);
                                                   >> 1040         pte_unmap(orig_src_pte);
1193         add_mm_rss_vec(dst_mm, rss);             1041         add_mm_rss_vec(dst_mm, rss);
1194         pte_unmap_unlock(orig_dst_pte, dst_pt    1042         pte_unmap_unlock(orig_dst_pte, dst_ptl);
1195         cond_resched();                          1043         cond_resched();
1196                                                  1044 
1197         if (ret == -EIO) {                    !! 1045         if (entry.val) {
1198                 VM_WARN_ON_ONCE(!entry.val);  !! 1046                 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
1199                 if (add_swap_count_continuati << 
1200                         ret = -ENOMEM;        << 
1201                         goto out;             << 
1202                 }                             << 
1203                 entry.val = 0;                << 
1204         } else if (ret == -EBUSY || unlikely( << 
1205                 goto out;                     << 
1206         } else if (ret ==  -EAGAIN) {         << 
1207                 prealloc = folio_prealloc(src << 
1208                 if (!prealloc)                << 
1209                         return -ENOMEM;          1047                         return -ENOMEM;
1210         } else if (ret < 0) {                 !! 1048                 progress = 0;
1211                 VM_WARN_ON_ONCE(1);           << 
1212         }                                        1049         }
1213                                               << 
1214         /* We've captured and resolved the er << 
1215         ret = 0;                              << 
1216                                               << 
1217         if (addr != end)                         1050         if (addr != end)
1218                 goto again;                      1051                 goto again;
1219 out:                                          !! 1052         return 0;
1220         if (unlikely(prealloc))               << 
1221                 folio_put(prealloc);          << 
1222         return ret;                           << 
1223 }                                                1053 }
1224                                                  1054 
1225 static inline int                             !! 1055 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1226 copy_pmd_range(struct vm_area_struct *dst_vma !! 1056                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
1227                pud_t *dst_pud, pud_t *src_pud !! 1057                 unsigned long addr, unsigned long end)
1228                unsigned long end)             << 
1229 {                                                1058 {
1230         struct mm_struct *dst_mm = dst_vma->v << 
1231         struct mm_struct *src_mm = src_vma->v << 
1232         pmd_t *src_pmd, *dst_pmd;                1059         pmd_t *src_pmd, *dst_pmd;
1233         unsigned long next;                      1060         unsigned long next;
1234                                                  1061 
1235         dst_pmd = pmd_alloc(dst_mm, dst_pud,     1062         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1236         if (!dst_pmd)                            1063         if (!dst_pmd)
1237                 return -ENOMEM;                  1064                 return -ENOMEM;
1238         src_pmd = pmd_offset(src_pud, addr);     1065         src_pmd = pmd_offset(src_pud, addr);
1239         do {                                     1066         do {
1240                 next = pmd_addr_end(addr, end    1067                 next = pmd_addr_end(addr, end);
1241                 if (is_swap_pmd(*src_pmd) ||  !! 1068                 if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
1242                         || pmd_devmap(*src_pm << 
1243                         int err;                 1069                         int err;
1244                         VM_BUG_ON_VMA(next-ad !! 1070                         VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
1245                         err = copy_huge_pmd(d !! 1071                         err = copy_huge_pmd(dst_mm, src_mm,
1246                                             a !! 1072                                             dst_pmd, src_pmd, addr, vma);
1247                         if (err == -ENOMEM)      1073                         if (err == -ENOMEM)
1248                                 return -ENOME    1074                                 return -ENOMEM;
1249                         if (!err)                1075                         if (!err)
1250                                 continue;        1076                                 continue;
1251                         /* fall through */       1077                         /* fall through */
1252                 }                                1078                 }
1253                 if (pmd_none_or_clear_bad(src    1079                 if (pmd_none_or_clear_bad(src_pmd))
1254                         continue;                1080                         continue;
1255                 if (copy_pte_range(dst_vma, s !! 1081                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1256                                    addr, next !! 1082                                                 vma, addr, next))
1257                         return -ENOMEM;          1083                         return -ENOMEM;
1258         } while (dst_pmd++, src_pmd++, addr =    1084         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1259         return 0;                                1085         return 0;
1260 }                                                1086 }
1261                                                  1087 
1262 static inline int                             !! 1088 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1263 copy_pud_range(struct vm_area_struct *dst_vma !! 1089                 p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
1264                p4d_t *dst_p4d, p4d_t *src_p4d !! 1090                 unsigned long addr, unsigned long end)
1265                unsigned long end)             << 
1266 {                                                1091 {
1267         struct mm_struct *dst_mm = dst_vma->v << 
1268         struct mm_struct *src_mm = src_vma->v << 
1269         pud_t *src_pud, *dst_pud;                1092         pud_t *src_pud, *dst_pud;
1270         unsigned long next;                      1093         unsigned long next;
1271                                                  1094 
1272         dst_pud = pud_alloc(dst_mm, dst_p4d,     1095         dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1273         if (!dst_pud)                            1096         if (!dst_pud)
1274                 return -ENOMEM;                  1097                 return -ENOMEM;
1275         src_pud = pud_offset(src_p4d, addr);     1098         src_pud = pud_offset(src_p4d, addr);
1276         do {                                     1099         do {
1277                 next = pud_addr_end(addr, end    1100                 next = pud_addr_end(addr, end);
1278                 if (pud_trans_huge(*src_pud)     1101                 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1279                         int err;                 1102                         int err;
1280                                                  1103 
1281                         VM_BUG_ON_VMA(next-ad !! 1104                         VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
1282                         err = copy_huge_pud(d    1105                         err = copy_huge_pud(dst_mm, src_mm,
1283                                             d !! 1106                                             dst_pud, src_pud, addr, vma);
1284                         if (err == -ENOMEM)      1107                         if (err == -ENOMEM)
1285                                 return -ENOME    1108                                 return -ENOMEM;
1286                         if (!err)                1109                         if (!err)
1287                                 continue;        1110                                 continue;
1288                         /* fall through */       1111                         /* fall through */
1289                 }                                1112                 }
1290                 if (pud_none_or_clear_bad(src    1113                 if (pud_none_or_clear_bad(src_pud))
1291                         continue;                1114                         continue;
1292                 if (copy_pmd_range(dst_vma, s !! 1115                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1293                                    addr, next !! 1116                                                 vma, addr, next))
1294                         return -ENOMEM;          1117                         return -ENOMEM;
1295         } while (dst_pud++, src_pud++, addr =    1118         } while (dst_pud++, src_pud++, addr = next, addr != end);
1296         return 0;                                1119         return 0;
1297 }                                                1120 }
1298                                                  1121 
1299 static inline int                             !! 1122 static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1300 copy_p4d_range(struct vm_area_struct *dst_vma !! 1123                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1301                pgd_t *dst_pgd, pgd_t *src_pgd !! 1124                 unsigned long addr, unsigned long end)
1302                unsigned long end)             << 
1303 {                                                1125 {
1304         struct mm_struct *dst_mm = dst_vma->v << 
1305         p4d_t *src_p4d, *dst_p4d;                1126         p4d_t *src_p4d, *dst_p4d;
1306         unsigned long next;                      1127         unsigned long next;
1307                                                  1128 
1308         dst_p4d = p4d_alloc(dst_mm, dst_pgd,     1129         dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1309         if (!dst_p4d)                            1130         if (!dst_p4d)
1310                 return -ENOMEM;                  1131                 return -ENOMEM;
1311         src_p4d = p4d_offset(src_pgd, addr);     1132         src_p4d = p4d_offset(src_pgd, addr);
1312         do {                                     1133         do {
1313                 next = p4d_addr_end(addr, end    1134                 next = p4d_addr_end(addr, end);
1314                 if (p4d_none_or_clear_bad(src    1135                 if (p4d_none_or_clear_bad(src_p4d))
1315                         continue;                1136                         continue;
1316                 if (copy_pud_range(dst_vma, s !! 1137                 if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
1317                                    addr, next !! 1138                                                 vma, addr, next))
1318                         return -ENOMEM;          1139                         return -ENOMEM;
1319         } while (dst_p4d++, src_p4d++, addr =    1140         } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1320         return 0;                                1141         return 0;
1321 }                                                1142 }
1322                                                  1143 
1323 /*                                            !! 1144 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1324  * Return true if the vma needs to copy the p !! 1145                 struct vm_area_struct *vma)
1325  * false when we can speed up fork() by allow << 
1326  * when the child accesses the memory range.  << 
1327  */                                           << 
1328 static bool                                   << 
1329 vma_needs_copy(struct vm_area_struct *dst_vma << 
1330 {                                             << 
1331         /*                                    << 
1332          * Always copy pgtables when dst_vma  << 
1333          * file-backed (e.g. shmem). Because  << 
1334          * contains uffd-wp protection inform << 
1335          * retrieve from page cache, and skip << 
1336          */                                   << 
1337         if (userfaultfd_wp(dst_vma))          << 
1338                 return true;                  << 
1339                                               << 
1340         if (src_vma->vm_flags & (VM_PFNMAP |  << 
1341                 return true;                  << 
1342                                               << 
1343         if (src_vma->anon_vma)                << 
1344                 return true;                  << 
1345                                               << 
1346         /*                                    << 
1347          * Don't copy ptes where a page fault << 
1348          * becomes much lighter when there ar << 
1349          * mappings. The tradeoff is that cop << 
1350          * than faulting.                     << 
1351          */                                   << 
1352         return false;                         << 
1353 }                                             << 
1354                                               << 
1355 int                                           << 
1356 copy_page_range(struct vm_area_struct *dst_vm << 
1357 {                                                1146 {
1358         pgd_t *src_pgd, *dst_pgd;                1147         pgd_t *src_pgd, *dst_pgd;
1359         unsigned long next;                      1148         unsigned long next;
1360         unsigned long addr = src_vma->vm_star !! 1149         unsigned long addr = vma->vm_start;
1361         unsigned long end = src_vma->vm_end;  !! 1150         unsigned long end = vma->vm_end;
1362         struct mm_struct *dst_mm = dst_vma->v !! 1151         unsigned long mmun_start;       /* For mmu_notifiers */
1363         struct mm_struct *src_mm = src_vma->v !! 1152         unsigned long mmun_end;         /* For mmu_notifiers */
1364         struct mmu_notifier_range range;      << 
1365         bool is_cow;                             1153         bool is_cow;
1366         int ret;                                 1154         int ret;
1367                                                  1155 
1368         if (!vma_needs_copy(dst_vma, src_vma) !! 1156         /*
                                                   >> 1157          * Don't copy ptes where a page fault will fill them correctly.
                                                   >> 1158          * Fork becomes much lighter when there are big shared or private
                                                   >> 1159          * readonly mappings. The tradeoff is that copy_page_range is more
                                                   >> 1160          * efficient than faulting.
                                                   >> 1161          */
                                                   >> 1162         if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
                                                   >> 1163                         !vma->anon_vma)
1369                 return 0;                        1164                 return 0;
1370                                                  1165 
1371         if (is_vm_hugetlb_page(src_vma))      !! 1166         if (is_vm_hugetlb_page(vma))
1372                 return copy_hugetlb_page_rang !! 1167                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1373                                                  1168 
1374         if (unlikely(src_vma->vm_flags & VM_P !! 1169         if (unlikely(vma->vm_flags & VM_PFNMAP)) {
1375                 /*                               1170                 /*
1376                  * We do not free on error ca    1171                  * We do not free on error cases below as remove_vma
1377                  * gets called on error from     1172                  * gets called on error from higher level routine
1378                  */                              1173                  */
1379                 ret = track_pfn_copy(src_vma) !! 1174                 ret = track_pfn_copy(vma);
1380                 if (ret)                         1175                 if (ret)
1381                         return ret;              1176                         return ret;
1382         }                                        1177         }
1383                                                  1178 
1384         /*                                       1179         /*
1385          * We need to invalidate the secondar    1180          * We need to invalidate the secondary MMU mappings only when
1386          * there could be a permission downgr    1181          * there could be a permission downgrade on the ptes of the
1387          * parent mm. And a permission downgr    1182          * parent mm. And a permission downgrade will only happen if
1388          * is_cow_mapping() returns true.        1183          * is_cow_mapping() returns true.
1389          */                                      1184          */
1390         is_cow = is_cow_mapping(src_vma->vm_f !! 1185         is_cow = is_cow_mapping(vma->vm_flags);
1391                                               !! 1186         mmun_start = addr;
1392         if (is_cow) {                         !! 1187         mmun_end   = end;
1393                 mmu_notifier_range_init(&rang !! 1188         if (is_cow)
1394                                         0, sr !! 1189                 mmu_notifier_invalidate_range_start(src_mm, mmun_start,
1395                 mmu_notifier_invalidate_range !! 1190                                                     mmun_end);
1396                 /*                            << 
1397                  * Disabling preemption is no << 
1398                  * the read side doesn't spin << 
1399                  *                            << 
1400                  * Use the raw variant of the << 
1401                  * lockdep complaining about  << 
1402                  */                           << 
1403                 vma_assert_write_locked(src_v << 
1404                 raw_write_seqcount_begin(&src << 
1405         }                                     << 
1406                                                  1191 
1407         ret = 0;                                 1192         ret = 0;
1408         dst_pgd = pgd_offset(dst_mm, addr);      1193         dst_pgd = pgd_offset(dst_mm, addr);
1409         src_pgd = pgd_offset(src_mm, addr);      1194         src_pgd = pgd_offset(src_mm, addr);
1410         do {                                     1195         do {
1411                 next = pgd_addr_end(addr, end    1196                 next = pgd_addr_end(addr, end);
1412                 if (pgd_none_or_clear_bad(src    1197                 if (pgd_none_or_clear_bad(src_pgd))
1413                         continue;                1198                         continue;
1414                 if (unlikely(copy_p4d_range(d !! 1199                 if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
1415                                             a !! 1200                                             vma, addr, next))) {
1416                         untrack_pfn_clear(dst << 
1417                         ret = -ENOMEM;           1201                         ret = -ENOMEM;
1418                         break;                   1202                         break;
1419                 }                                1203                 }
1420         } while (dst_pgd++, src_pgd++, addr =    1204         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1421                                                  1205 
1422         if (is_cow) {                         !! 1206         if (is_cow)
1423                 raw_write_seqcount_end(&src_m !! 1207                 mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
1424                 mmu_notifier_invalidate_range << 
1425         }                                     << 
1426         return ret;                              1208         return ret;
1427 }                                                1209 }
1428                                                  1210 
1429 /* Whether we should zap all COWed (private)  << 
1430 static inline bool should_zap_cows(struct zap << 
1431 {                                             << 
1432         /* By default, zap all pages */       << 
1433         if (!details)                         << 
1434                 return true;                  << 
1435                                               << 
1436         /* Or, we zap COWed pages only if the << 
1437         return details->even_cows;            << 
1438 }                                             << 
1439                                               << 
1440 /* Decides whether we should zap this folio w << 
1441 static inline bool should_zap_folio(struct za << 
1442                                     struct fo << 
1443 {                                             << 
1444         /* If we can make a decision without  << 
1445         if (should_zap_cows(details))         << 
1446                 return true;                  << 
1447                                               << 
1448         /* Otherwise we should only zap non-a << 
1449         return !folio_test_anon(folio);       << 
1450 }                                             << 
1451                                               << 
1452 static inline bool zap_drop_file_uffd_wp(stru << 
1453 {                                             << 
1454         if (!details)                         << 
1455                 return false;                 << 
1456                                               << 
1457         return details->zap_flags & ZAP_FLAG_ << 
1458 }                                             << 
1459                                               << 
1460 /*                                            << 
1461  * This function makes sure that we'll replac << 
1462  * swap special pte marker when necessary. Mu << 
1463  */                                           << 
1464 static inline void                            << 
1465 zap_install_uffd_wp_if_needed(struct vm_area_ << 
1466                               unsigned long a << 
1467                               struct zap_deta << 
1468 {                                             << 
1469         /* Zap on anonymous always means drop << 
1470         if (vma_is_anonymous(vma))            << 
1471                 return;                       << 
1472                                               << 
1473         if (zap_drop_file_uffd_wp(details))   << 
1474                 return;                       << 
1475                                               << 
1476         for (;;) {                            << 
1477                 /* the PFN in the PTE is irre << 
1478                 pte_install_uffd_wp_if_needed << 
1479                 if (--nr == 0)                << 
1480                         break;                << 
1481                 pte++;                        << 
1482                 addr += PAGE_SIZE;            << 
1483         }                                     << 
1484 }                                             << 
1485                                               << 
1486 static __always_inline void zap_present_folio << 
1487                 struct vm_area_struct *vma, s << 
1488                 struct page *page, pte_t *pte << 
1489                 unsigned long addr, struct za << 
1490                 bool *force_flush, bool *forc << 
1491 {                                             << 
1492         struct mm_struct *mm = tlb->mm;       << 
1493         bool delay_rmap = false;              << 
1494                                               << 
1495         if (!folio_test_anon(folio)) {        << 
1496                 ptent = get_and_clear_full_pt << 
1497                 if (pte_dirty(ptent)) {       << 
1498                         folio_mark_dirty(foli << 
1499                         if (tlb_delay_rmap(tl << 
1500                                 delay_rmap =  << 
1501                                 *force_flush  << 
1502                         }                     << 
1503                 }                             << 
1504                 if (pte_young(ptent) && likel << 
1505                         folio_mark_accessed(f << 
1506                 rss[mm_counter(folio)] -= nr; << 
1507         } else {                              << 
1508                 /* We don't need up-to-date a << 
1509                 clear_full_ptes(mm, addr, pte << 
1510                 rss[MM_ANONPAGES] -= nr;      << 
1511         }                                     << 
1512         /* Checking a single PTE in a batch i << 
1513         arch_check_zapped_pte(vma, ptent);    << 
1514         tlb_remove_tlb_entries(tlb, pte, nr,  << 
1515         if (unlikely(userfaultfd_pte_wp(vma,  << 
1516                 zap_install_uffd_wp_if_needed << 
1517                                               << 
1518                                               << 
1519         if (!delay_rmap) {                    << 
1520                 folio_remove_rmap_ptes(folio, << 
1521                                               << 
1522                 if (unlikely(folio_mapcount(f << 
1523                         print_bad_pte(vma, ad << 
1524         }                                     << 
1525         if (unlikely(__tlb_remove_folio_pages << 
1526                 *force_flush = true;          << 
1527                 *force_break = true;          << 
1528         }                                     << 
1529 }                                             << 
1530                                               << 
1531 /*                                            << 
1532  * Zap or skip at least one present PTE, tryi << 
1533  * PTEs that map consecutive pages of the sam << 
1534  *                                            << 
1535  * Returns the number of processed (skipped o << 
1536  */                                           << 
1537 static inline int zap_present_ptes(struct mmu << 
1538                 struct vm_area_struct *vma, p << 
1539                 unsigned int max_nr, unsigned << 
1540                 struct zap_details *details,  << 
1541                 bool *force_break)            << 
1542 {                                             << 
1543         const fpb_t fpb_flags = FPB_IGNORE_DI << 
1544         struct mm_struct *mm = tlb->mm;       << 
1545         struct folio *folio;                  << 
1546         struct page *page;                    << 
1547         int nr;                               << 
1548                                               << 
1549         page = vm_normal_page(vma, addr, pten << 
1550         if (!page) {                          << 
1551                 /* We don't need up-to-date a << 
1552                 ptep_get_and_clear_full(mm, a << 
1553                 arch_check_zapped_pte(vma, pt << 
1554                 tlb_remove_tlb_entry(tlb, pte << 
1555                 if (userfaultfd_pte_wp(vma, p << 
1556                         zap_install_uffd_wp_i << 
1557                                               << 
1558                 ksm_might_unmap_zero_page(mm, << 
1559                 return 1;                     << 
1560         }                                     << 
1561                                               << 
1562         folio = page_folio(page);             << 
1563         if (unlikely(!should_zap_folio(detail << 
1564                 return 1;                     << 
1565                                               << 
1566         /*                                    << 
1567          * Make sure that the common "small f << 
1568          * by keeping the batching logic sepa << 
1569          */                                   << 
1570         if (unlikely(folio_test_large(folio)  << 
1571                 nr = folio_pte_batch(folio, a << 
1572                                      NULL, NU << 
1573                                               << 
1574                 zap_present_folio_ptes(tlb, v << 
1575                                        addr,  << 
1576                                        force_ << 
1577                 return nr;                    << 
1578         }                                     << 
1579         zap_present_folio_ptes(tlb, vma, foli << 
1580                                details, rss,  << 
1581         return 1;                             << 
1582 }                                             << 
1583                                               << 
1584 static unsigned long zap_pte_range(struct mmu    1211 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1585                                 struct vm_are    1212                                 struct vm_area_struct *vma, pmd_t *pmd,
1586                                 unsigned long    1213                                 unsigned long addr, unsigned long end,
1587                                 struct zap_de    1214                                 struct zap_details *details)
1588 {                                                1215 {
1589         bool force_flush = false, force_break << 
1590         struct mm_struct *mm = tlb->mm;          1216         struct mm_struct *mm = tlb->mm;
                                                   >> 1217         int force_flush = 0;
1591         int rss[NR_MM_COUNTERS];                 1218         int rss[NR_MM_COUNTERS];
1592         spinlock_t *ptl;                         1219         spinlock_t *ptl;
1593         pte_t *start_pte;                        1220         pte_t *start_pte;
1594         pte_t *pte;                              1221         pte_t *pte;
1595         swp_entry_t entry;                       1222         swp_entry_t entry;
1596         int nr;                               << 
1597                                                  1223 
1598         tlb_change_page_size(tlb, PAGE_SIZE); !! 1224         tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
                                                   >> 1225 again:
1599         init_rss_vec(rss);                       1226         init_rss_vec(rss);
1600         start_pte = pte = pte_offset_map_lock !! 1227         start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1601         if (!pte)                             !! 1228         pte = start_pte;
1602                 return addr;                  << 
1603                                               << 
1604         flush_tlb_batched_pending(mm);           1229         flush_tlb_batched_pending(mm);
1605         arch_enter_lazy_mmu_mode();              1230         arch_enter_lazy_mmu_mode();
1606         do {                                     1231         do {
1607                 pte_t ptent = ptep_get(pte);  !! 1232                 pte_t ptent = *pte;
1608                 struct folio *folio;          << 
1609                 struct page *page;            << 
1610                 int max_nr;                   << 
1611                                               << 
1612                 nr = 1;                       << 
1613                 if (pte_none(ptent))             1233                 if (pte_none(ptent))
1614                         continue;                1234                         continue;
1615                                                  1235 
1616                 if (need_resched())           << 
1617                         break;                << 
1618                                               << 
1619                 if (pte_present(ptent)) {        1236                 if (pte_present(ptent)) {
1620                         max_nr = (end - addr) !! 1237                         struct page *page;
1621                         nr = zap_present_ptes !! 1238 
1622                                               !! 1239                         page = vm_normal_page(vma, addr, ptent);
1623                                               !! 1240                         if (unlikely(details) && page) {
1624                         if (unlikely(force_br !! 1241                                 /*
1625                                 addr += nr *  !! 1242                                  * unmap_shared_mapping_pages() wants to
                                                   >> 1243                                  * invalidate cache without truncating:
                                                   >> 1244                                  * unmap shared but keep private pages.
                                                   >> 1245                                  */
                                                   >> 1246                                 if (details->check_mapping &&
                                                   >> 1247                                     details->check_mapping != page_rmapping(page))
                                                   >> 1248                                         continue;
                                                   >> 1249                         }
                                                   >> 1250                         ptent = ptep_get_and_clear_full(mm, addr, pte,
                                                   >> 1251                                                         tlb->fullmm);
                                                   >> 1252                         tlb_remove_tlb_entry(tlb, pte, addr);
                                                   >> 1253                         if (unlikely(!page))
                                                   >> 1254                                 continue;
                                                   >> 1255 
                                                   >> 1256                         if (!PageAnon(page)) {
                                                   >> 1257                                 if (pte_dirty(ptent)) {
                                                   >> 1258                                         force_flush = 1;
                                                   >> 1259                                         set_page_dirty(page);
                                                   >> 1260                                 }
                                                   >> 1261                                 if (pte_young(ptent) &&
                                                   >> 1262                                     likely(!(vma->vm_flags & VM_SEQ_READ)))
                                                   >> 1263                                         mark_page_accessed(page);
                                                   >> 1264                         }
                                                   >> 1265                         rss[mm_counter(page)]--;
                                                   >> 1266                         page_remove_rmap(page, false);
                                                   >> 1267                         if (unlikely(page_mapcount(page) < 0))
                                                   >> 1268                                 print_bad_pte(vma, addr, ptent, page);
                                                   >> 1269                         if (unlikely(__tlb_remove_page(tlb, page))) {
                                                   >> 1270                                 force_flush = 1;
                                                   >> 1271                                 addr += PAGE_SIZE;
1626                                 break;           1272                                 break;
1627                         }                        1273                         }
1628                         continue;                1274                         continue;
1629                 }                                1275                 }
                                                   >> 1276                 /* If details->check_mapping, we leave swap entries. */
                                                   >> 1277                 if (unlikely(details))
                                                   >> 1278                         continue;
1630                                                  1279 
1631                 entry = pte_to_swp_entry(pten    1280                 entry = pte_to_swp_entry(ptent);
1632                 if (is_device_private_entry(e !! 1281                 if (!non_swap_entry(entry))
1633                     is_device_exclusive_entry !! 1282                         rss[MM_SWAPENTS]--;
1634                         page = pfn_swap_entry !! 1283                 else if (is_migration_entry(entry)) {
1635                         folio = page_folio(pa !! 1284                         struct page *page;
1636                         if (unlikely(!should_ !! 1285 
1637                                 continue;     !! 1286                         page = migration_entry_to_page(entry);
1638                         /*                    !! 1287                         rss[mm_counter(page)]--;
1639                          * Both device privat !! 1288                 }
1640                          * work with anonymou !! 1289                 if (unlikely(!free_swap_and_cache(entry)))
1641                          * consider uffd-wp b !! 1290                         print_bad_pte(vma, addr, ptent, NULL);
1642                          * see zap_install_uf !! 1291                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1643                          */                   !! 1292         } while (pte++, addr += PAGE_SIZE, addr != end);
1644                         WARN_ON_ONCE(!vma_is_ << 
1645                         rss[mm_counter(folio) << 
1646                         if (is_device_private << 
1647                                 folio_remove_ << 
1648                         folio_put(folio);     << 
1649                 } else if (!non_swap_entry(en << 
1650                         max_nr = (end - addr) << 
1651                         nr = swap_pte_batch(p << 
1652                         /* Genuine swap entri << 
1653                         if (!should_zap_cows( << 
1654                                 continue;     << 
1655                         rss[MM_SWAPENTS] -= n << 
1656                         free_swap_and_cache_n << 
1657                 } else if (is_migration_entry << 
1658                         folio = pfn_swap_entr << 
1659                         if (!should_zap_folio << 
1660                                 continue;     << 
1661                         rss[mm_counter(folio) << 
1662                 } else if (pte_marker_entry_u << 
1663                         /*                    << 
1664                          * For anon: always d << 
1665                          * drop the marker if << 
1666                          */                   << 
1667                         if (!vma_is_anonymous << 
1668                             !zap_drop_file_uf << 
1669                                 continue;     << 
1670                 } else if (is_hwpoison_entry( << 
1671                            is_poisoned_swp_en << 
1672                         if (!should_zap_cows( << 
1673                                 continue;     << 
1674                 } else {                      << 
1675                         /* We should have cov << 
1676                         pr_alert("unrecognize << 
1677                         WARN_ON_ONCE(1);      << 
1678                 }                             << 
1679                 clear_not_present_full_ptes(m << 
1680                 zap_install_uffd_wp_if_needed << 
1681         } while (pte += nr, addr += PAGE_SIZE << 
1682                                                  1293 
1683         add_mm_rss_vec(mm, rss);                 1294         add_mm_rss_vec(mm, rss);
1684         arch_leave_lazy_mmu_mode();              1295         arch_leave_lazy_mmu_mode();
1685                                                  1296 
1686         /* Do the actual TLB flush before dro    1297         /* Do the actual TLB flush before dropping ptl */
1687         if (force_flush) {                    !! 1298         if (force_flush)
1688                 tlb_flush_mmu_tlbonly(tlb);      1299                 tlb_flush_mmu_tlbonly(tlb);
1689                 tlb_flush_rmaps(tlb, vma);    << 
1690         }                                     << 
1691         pte_unmap_unlock(start_pte, ptl);        1300         pte_unmap_unlock(start_pte, ptl);
1692                                                  1301 
1693         /*                                       1302         /*
1694          * If we forced a TLB flush (either d    1303          * If we forced a TLB flush (either due to running out of
1695          * batch buffers or because we needed    1304          * batch buffers or because we needed to flush dirty TLB
1696          * entries before releasing the ptl),    1305          * entries before releasing the ptl), free the batched
1697          * memory too. Come back again if we  !! 1306          * memory too. Restart if we didn't do everything.
1698          */                                      1307          */
1699         if (force_flush)                      !! 1308         if (force_flush) {
1700                 tlb_flush_mmu(tlb);           !! 1309                 force_flush = 0;
                                                   >> 1310                 tlb_flush_mmu_free(tlb);
                                                   >> 1311                 if (addr != end)
                                                   >> 1312                         goto again;
                                                   >> 1313         }
1701                                                  1314 
1702         return addr;                             1315         return addr;
1703 }                                                1316 }
1704                                                  1317 
1705 static inline unsigned long zap_pmd_range(str    1318 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1706                                 struct vm_are    1319                                 struct vm_area_struct *vma, pud_t *pud,
1707                                 unsigned long    1320                                 unsigned long addr, unsigned long end,
1708                                 struct zap_de    1321                                 struct zap_details *details)
1709 {                                                1322 {
1710         pmd_t *pmd;                              1323         pmd_t *pmd;
1711         unsigned long next;                      1324         unsigned long next;
1712                                                  1325 
1713         pmd = pmd_offset(pud, addr);             1326         pmd = pmd_offset(pud, addr);
1714         do {                                     1327         do {
1715                 next = pmd_addr_end(addr, end    1328                 next = pmd_addr_end(addr, end);
1716                 if (is_swap_pmd(*pmd) || pmd_ !! 1329                 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1717                         if (next - addr != HP !! 1330                         if (next - addr != HPAGE_PMD_SIZE) {
                                                   >> 1331                                 VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
                                                   >> 1332                                     !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1718                                 __split_huge_    1333                                 __split_huge_pmd(vma, pmd, addr, false, NULL);
1719                         else if (zap_huge_pmd !! 1334                         } else if (zap_huge_pmd(tlb, vma, pmd, addr))
1720                                 addr = next;  !! 1335                                 goto next;
1721                                 continue;     << 
1722                         }                     << 
1723                         /* fall through */       1336                         /* fall through */
1724                 } else if (details && details << 
1725                            folio_test_pmd_map << 
1726                            next - addr == HPA << 
1727                         spinlock_t *ptl = pmd << 
1728                         /*                    << 
1729                          * Take and drop THP  << 
1730                          * prematurely, while << 
1731                          * but not yet decrem << 
1732                          */                   << 
1733                         spin_unlock(ptl);     << 
1734                 }                             << 
1735                 if (pmd_none(*pmd)) {         << 
1736                         addr = next;          << 
1737                         continue;             << 
1738                 }                                1337                 }
1739                 addr = zap_pte_range(tlb, vma !! 1338                 /*
1740                 if (addr != next)             !! 1339                  * Here there can be other concurrent MADV_DONTNEED or
1741                         pmd--;                !! 1340                  * trans huge page faults running, and if the pmd is
1742         } while (pmd++, cond_resched(), addr  !! 1341                  * none or trans huge it can change under us. This is
                                                   >> 1342                  * because MADV_DONTNEED holds the mmap_sem in read
                                                   >> 1343                  * mode.
                                                   >> 1344                  */
                                                   >> 1345                 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                                                   >> 1346                         goto next;
                                                   >> 1347                 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
                                                   >> 1348 next:
                                                   >> 1349                 cond_resched();
                                                   >> 1350         } while (pmd++, addr = next, addr != end);
1743                                                  1351 
1744         return addr;                             1352         return addr;
1745 }                                                1353 }
1746                                                  1354 
1747 static inline unsigned long zap_pud_range(str    1355 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1748                                 struct vm_are    1356                                 struct vm_area_struct *vma, p4d_t *p4d,
1749                                 unsigned long    1357                                 unsigned long addr, unsigned long end,
1750                                 struct zap_de    1358                                 struct zap_details *details)
1751 {                                                1359 {
1752         pud_t *pud;                              1360         pud_t *pud;
1753         unsigned long next;                      1361         unsigned long next;
1754                                                  1362 
1755         pud = pud_offset(p4d, addr);             1363         pud = pud_offset(p4d, addr);
1756         do {                                     1364         do {
1757                 next = pud_addr_end(addr, end    1365                 next = pud_addr_end(addr, end);
1758                 if (pud_trans_huge(*pud) || p    1366                 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1759                         if (next - addr != HP    1367                         if (next - addr != HPAGE_PUD_SIZE) {
1760                                 mmap_assert_l !! 1368                                 VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1761                                 split_huge_pu    1369                                 split_huge_pud(vma, pud, addr);
1762                         } else if (zap_huge_p    1370                         } else if (zap_huge_pud(tlb, vma, pud, addr))
1763                                 goto next;       1371                                 goto next;
1764                         /* fall through */       1372                         /* fall through */
1765                 }                                1373                 }
1766                 if (pud_none_or_clear_bad(pud    1374                 if (pud_none_or_clear_bad(pud))
1767                         continue;                1375                         continue;
1768                 next = zap_pmd_range(tlb, vma    1376                 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1769 next:                                            1377 next:
1770                 cond_resched();                  1378                 cond_resched();
1771         } while (pud++, addr = next, addr !=     1379         } while (pud++, addr = next, addr != end);
1772                                                  1380 
1773         return addr;                             1381         return addr;
1774 }                                                1382 }
1775                                                  1383 
1776 static inline unsigned long zap_p4d_range(str    1384 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1777                                 struct vm_are    1385                                 struct vm_area_struct *vma, pgd_t *pgd,
1778                                 unsigned long    1386                                 unsigned long addr, unsigned long end,
1779                                 struct zap_de    1387                                 struct zap_details *details)
1780 {                                                1388 {
1781         p4d_t *p4d;                              1389         p4d_t *p4d;
1782         unsigned long next;                      1390         unsigned long next;
1783                                                  1391 
1784         p4d = p4d_offset(pgd, addr);             1392         p4d = p4d_offset(pgd, addr);
1785         do {                                     1393         do {
1786                 next = p4d_addr_end(addr, end    1394                 next = p4d_addr_end(addr, end);
1787                 if (p4d_none_or_clear_bad(p4d    1395                 if (p4d_none_or_clear_bad(p4d))
1788                         continue;                1396                         continue;
1789                 next = zap_pud_range(tlb, vma    1397                 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1790         } while (p4d++, addr = next, addr !=     1398         } while (p4d++, addr = next, addr != end);
1791                                                  1399 
1792         return addr;                             1400         return addr;
1793 }                                                1401 }
1794                                                  1402 
1795 void unmap_page_range(struct mmu_gather *tlb,    1403 void unmap_page_range(struct mmu_gather *tlb,
1796                              struct vm_area_s    1404                              struct vm_area_struct *vma,
1797                              unsigned long ad    1405                              unsigned long addr, unsigned long end,
1798                              struct zap_detai    1406                              struct zap_details *details)
1799 {                                                1407 {
1800         pgd_t *pgd;                              1408         pgd_t *pgd;
1801         unsigned long next;                      1409         unsigned long next;
1802                                                  1410 
1803         BUG_ON(addr >= end);                     1411         BUG_ON(addr >= end);
1804         tlb_start_vma(tlb, vma);                 1412         tlb_start_vma(tlb, vma);
1805         pgd = pgd_offset(vma->vm_mm, addr);      1413         pgd = pgd_offset(vma->vm_mm, addr);
1806         do {                                     1414         do {
1807                 next = pgd_addr_end(addr, end    1415                 next = pgd_addr_end(addr, end);
1808                 if (pgd_none_or_clear_bad(pgd    1416                 if (pgd_none_or_clear_bad(pgd))
1809                         continue;                1417                         continue;
1810                 next = zap_p4d_range(tlb, vma    1418                 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1811         } while (pgd++, addr = next, addr !=     1419         } while (pgd++, addr = next, addr != end);
1812         tlb_end_vma(tlb, vma);                   1420         tlb_end_vma(tlb, vma);
1813 }                                                1421 }
1814                                                  1422 
1815                                                  1423 
1816 static void unmap_single_vma(struct mmu_gathe    1424 static void unmap_single_vma(struct mmu_gather *tlb,
1817                 struct vm_area_struct *vma, u    1425                 struct vm_area_struct *vma, unsigned long start_addr,
1818                 unsigned long end_addr,          1426                 unsigned long end_addr,
1819                 struct zap_details *details,  !! 1427                 struct zap_details *details)
1820 {                                                1428 {
1821         unsigned long start = max(vma->vm_sta    1429         unsigned long start = max(vma->vm_start, start_addr);
1822         unsigned long end;                       1430         unsigned long end;
1823                                                  1431 
1824         if (start >= vma->vm_end)                1432         if (start >= vma->vm_end)
1825                 return;                          1433                 return;
1826         end = min(vma->vm_end, end_addr);        1434         end = min(vma->vm_end, end_addr);
1827         if (end <= vma->vm_start)                1435         if (end <= vma->vm_start)
1828                 return;                          1436                 return;
1829                                                  1437 
1830         if (vma->vm_file)                        1438         if (vma->vm_file)
1831                 uprobe_munmap(vma, start, end    1439                 uprobe_munmap(vma, start, end);
1832                                                  1440 
1833         if (unlikely(vma->vm_flags & VM_PFNMA    1441         if (unlikely(vma->vm_flags & VM_PFNMAP))
1834                 untrack_pfn(vma, 0, 0, mm_wr_ !! 1442                 untrack_pfn(vma, 0, 0);
1835                                                  1443 
1836         if (start != end) {                      1444         if (start != end) {
1837                 if (unlikely(is_vm_hugetlb_pa    1445                 if (unlikely(is_vm_hugetlb_page(vma))) {
1838                         /*                       1446                         /*
1839                          * It is undesirable     1447                          * It is undesirable to test vma->vm_file as it
1840                          * should be non-null    1448                          * should be non-null for valid hugetlb area.
1841                          * However, vm_file w    1449                          * However, vm_file will be NULL in the error
1842                          * cleanup path of mm    1450                          * cleanup path of mmap_region. When
1843                          * hugetlbfs ->mmap m    1451                          * hugetlbfs ->mmap method fails,
1844                          * mmap_region() null    1452                          * mmap_region() nullifies vma->vm_file
1845                          * before calling thi    1453                          * before calling this function to clean up.
1846                          * Since no pte has a    1454                          * Since no pte has actually been setup, it is
1847                          * safe to do nothing    1455                          * safe to do nothing in this case.
1848                          */                      1456                          */
1849                         if (vma->vm_file) {      1457                         if (vma->vm_file) {
1850                                 zap_flags_t z !! 1458                                 i_mmap_lock_write(vma->vm_file->f_mapping);
1851                                     details-> !! 1459                                 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1852                                 __unmap_hugep !! 1460                                 i_mmap_unlock_write(vma->vm_file->f_mapping);
1853                                               << 
1854                         }                        1461                         }
1855                 } else                           1462                 } else
1856                         unmap_page_range(tlb,    1463                         unmap_page_range(tlb, vma, start, end, details);
1857         }                                        1464         }
1858 }                                                1465 }
1859                                                  1466 
1860 /**                                              1467 /**
1861  * unmap_vmas - unmap a range of memory cover    1468  * unmap_vmas - unmap a range of memory covered by a list of vma's
1862  * @tlb: address of the caller's struct mmu_g    1469  * @tlb: address of the caller's struct mmu_gather
1863  * @mas: the maple state                      << 
1864  * @vma: the starting vma                        1470  * @vma: the starting vma
1865  * @start_addr: virtual address at which to s    1471  * @start_addr: virtual address at which to start unmapping
1866  * @end_addr: virtual address at which to end    1472  * @end_addr: virtual address at which to end unmapping
1867  * @tree_end: The maximum index to check      << 
1868  * @mm_wr_locked: lock flag                   << 
1869  *                                               1473  *
1870  * Unmap all pages in the vma list.              1474  * Unmap all pages in the vma list.
1871  *                                               1475  *
1872  * Only addresses between `start' and `end' w    1476  * Only addresses between `start' and `end' will be unmapped.
1873  *                                               1477  *
1874  * The VMA list must be sorted in ascending v    1478  * The VMA list must be sorted in ascending virtual address order.
1875  *                                               1479  *
1876  * unmap_vmas() assumes that the caller will     1480  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1877  * range after unmap_vmas() returns.  So the     1481  * range after unmap_vmas() returns.  So the only responsibility here is to
1878  * ensure that any thus-far unmapped pages ar    1482  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1879  * drops the lock and schedules.                 1483  * drops the lock and schedules.
1880  */                                              1484  */
1881 void unmap_vmas(struct mmu_gather *tlb, struc !! 1485 void unmap_vmas(struct mmu_gather *tlb,
1882                 struct vm_area_struct *vma, u    1486                 struct vm_area_struct *vma, unsigned long start_addr,
1883                 unsigned long end_addr, unsig !! 1487                 unsigned long end_addr)
1884                 bool mm_wr_locked)            << 
1885 {                                                1488 {
1886         struct mmu_notifier_range range;      !! 1489         struct mm_struct *mm = vma->vm_mm;
1887         struct zap_details details = {        << 
1888                 .zap_flags = ZAP_FLAG_DROP_MA << 
1889                 /* Careful - we need to zap p << 
1890                 .even_cows = true,            << 
1891         };                                    << 
1892                                                  1490 
1893         mmu_notifier_range_init(&range, MMU_N !! 1491         mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1894                                 start_addr, e !! 1492         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1895         mmu_notifier_invalidate_range_start(& !! 1493                 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1896         do {                                  !! 1494         mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1897                 unsigned long start = start_a !! 1495 }
1898                 unsigned long end = end_addr; !! 1496 
1899                 hugetlb_zap_begin(vma, &start !! 1497 /**
1900                 unmap_single_vma(tlb, vma, st !! 1498  * zap_page_range - remove user pages in a given range
1901                                  mm_wr_locked !! 1499  * @vma: vm_area_struct holding the applicable pages
1902                 hugetlb_zap_end(vma, &details !! 1500  * @start: starting address of pages to zap
1903                 vma = mas_find(mas, tree_end  !! 1501  * @size: number of bytes to zap
1904         } while (vma && likely(!xa_is_zero(vm !! 1502  *
1905         mmu_notifier_invalidate_range_end(&ra !! 1503  * Caller must protect the VMA list
                                                   >> 1504  */
                                                   >> 1505 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
                                                   >> 1506                 unsigned long size)
                                                   >> 1507 {
                                                   >> 1508         struct mm_struct *mm = vma->vm_mm;
                                                   >> 1509         struct mmu_gather tlb;
                                                   >> 1510         unsigned long end = start + size;
                                                   >> 1511 
                                                   >> 1512         lru_add_drain();
                                                   >> 1513         tlb_gather_mmu(&tlb, mm, start, end);
                                                   >> 1514         update_hiwater_rss(mm);
                                                   >> 1515         mmu_notifier_invalidate_range_start(mm, start, end);
                                                   >> 1516         for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
                                                   >> 1517                 unmap_single_vma(&tlb, vma, start, end, NULL);
                                                   >> 1518         mmu_notifier_invalidate_range_end(mm, start, end);
                                                   >> 1519         tlb_finish_mmu(&tlb, start, end);
1906 }                                                1520 }
1907                                                  1521 
1908 /**                                              1522 /**
1909  * zap_page_range_single - remove user pages     1523  * zap_page_range_single - remove user pages in a given range
1910  * @vma: vm_area_struct holding the applicabl    1524  * @vma: vm_area_struct holding the applicable pages
1911  * @address: starting address of pages to zap    1525  * @address: starting address of pages to zap
1912  * @size: number of bytes to zap                 1526  * @size: number of bytes to zap
1913  * @details: details of shared cache invalida    1527  * @details: details of shared cache invalidation
1914  *                                               1528  *
1915  * The range must fit into one VMA.              1529  * The range must fit into one VMA.
1916  */                                              1530  */
1917 void zap_page_range_single(struct vm_area_str !! 1531 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1918                 unsigned long size, struct za    1532                 unsigned long size, struct zap_details *details)
1919 {                                                1533 {
1920         const unsigned long end = address + s !! 1534         struct mm_struct *mm = vma->vm_mm;
1921         struct mmu_notifier_range range;      << 
1922         struct mmu_gather tlb;                   1535         struct mmu_gather tlb;
                                                   >> 1536         unsigned long end = address + size;
1923                                                  1537 
1924         lru_add_drain();                         1538         lru_add_drain();
1925         mmu_notifier_range_init(&range, MMU_N !! 1539         tlb_gather_mmu(&tlb, mm, address, end);
1926                                 address, end) !! 1540         update_hiwater_rss(mm);
1927         hugetlb_zap_begin(vma, &range.start,  !! 1541         mmu_notifier_invalidate_range_start(mm, address, end);
1928         tlb_gather_mmu(&tlb, vma->vm_mm);     !! 1542         unmap_single_vma(&tlb, vma, address, end, details);
1929         update_hiwater_rss(vma->vm_mm);       !! 1543         mmu_notifier_invalidate_range_end(mm, address, end);
1930         mmu_notifier_invalidate_range_start(& !! 1544         tlb_finish_mmu(&tlb, address, end);
1931         /*                                    << 
1932          * unmap 'address-end' not 'range.sta << 
1933          * could have been expanded for huget << 
1934          */                                   << 
1935         unmap_single_vma(&tlb, vma, address,  << 
1936         mmu_notifier_invalidate_range_end(&ra << 
1937         tlb_finish_mmu(&tlb);                 << 
1938         hugetlb_zap_end(vma, details);        << 
1939 }                                                1545 }
1940                                                  1546 
1941 /**                                              1547 /**
1942  * zap_vma_ptes - remove ptes mapping the vma    1548  * zap_vma_ptes - remove ptes mapping the vma
1943  * @vma: vm_area_struct holding ptes to be za    1549  * @vma: vm_area_struct holding ptes to be zapped
1944  * @address: starting address of pages to zap    1550  * @address: starting address of pages to zap
1945  * @size: number of bytes to zap                 1551  * @size: number of bytes to zap
1946  *                                               1552  *
1947  * This function only unmaps ptes assigned to    1553  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1948  *                                               1554  *
1949  * The entire address range must be fully con    1555  * The entire address range must be fully contained within the vma.
1950  *                                               1556  *
                                                   >> 1557  * Returns 0 if successful.
1951  */                                              1558  */
1952 void zap_vma_ptes(struct vm_area_struct *vma, !! 1559 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1953                 unsigned long size)              1560                 unsigned long size)
1954 {                                                1561 {
1955         if (!range_in_vma(vma, address, addre !! 1562         if (address < vma->vm_start || address + size > vma->vm_end ||
1956                         !(vma->vm_flags & VM_    1563                         !(vma->vm_flags & VM_PFNMAP))
1957                 return;                       !! 1564                 return -1;
1958                                               << 
1959         zap_page_range_single(vma, address, s    1565         zap_page_range_single(vma, address, size, NULL);
                                                   >> 1566         return 0;
1960 }                                                1567 }
1961 EXPORT_SYMBOL_GPL(zap_vma_ptes);                 1568 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1962                                                  1569 
1963 static pmd_t *walk_to_pmd(struct mm_struct *m !! 1570 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
                                                   >> 1571                         spinlock_t **ptl)
1964 {                                                1572 {
1965         pgd_t *pgd;                              1573         pgd_t *pgd;
1966         p4d_t *p4d;                              1574         p4d_t *p4d;
1967         pud_t *pud;                              1575         pud_t *pud;
1968         pmd_t *pmd;                              1576         pmd_t *pmd;
1969                                                  1577 
1970         pgd = pgd_offset(mm, addr);              1578         pgd = pgd_offset(mm, addr);
1971         p4d = p4d_alloc(mm, pgd, addr);          1579         p4d = p4d_alloc(mm, pgd, addr);
1972         if (!p4d)                                1580         if (!p4d)
1973                 return NULL;                     1581                 return NULL;
1974         pud = pud_alloc(mm, p4d, addr);          1582         pud = pud_alloc(mm, p4d, addr);
1975         if (!pud)                                1583         if (!pud)
1976                 return NULL;                     1584                 return NULL;
1977         pmd = pmd_alloc(mm, pud, addr);          1585         pmd = pmd_alloc(mm, pud, addr);
1978         if (!pmd)                                1586         if (!pmd)
1979                 return NULL;                     1587                 return NULL;
1980                                                  1588 
1981         VM_BUG_ON(pmd_trans_huge(*pmd));         1589         VM_BUG_ON(pmd_trans_huge(*pmd));
1982         return pmd;                           << 
1983 }                                             << 
1984                                               << 
1985 pte_t *__get_locked_pte(struct mm_struct *mm, << 
1986                         spinlock_t **ptl)     << 
1987 {                                             << 
1988         pmd_t *pmd = walk_to_pmd(mm, addr);   << 
1989                                               << 
1990         if (!pmd)                             << 
1991                 return NULL;                  << 
1992         return pte_alloc_map_lock(mm, pmd, ad    1590         return pte_alloc_map_lock(mm, pmd, addr, ptl);
1993 }                                                1591 }
1994                                                  1592 
1995 static bool vm_mixed_zeropage_allowed(struct  !! 1593 /*
1996 {                                             !! 1594  * This is the old fallback for page remapping.
1997         VM_WARN_ON_ONCE(vma->vm_flags & VM_PF !! 1595  *
1998         /*                                    !! 1596  * For historical reasons, it only allows reserved pages. Only
1999          * Whoever wants to forbid the zeropa !! 1597  * old drivers should use this, and they needed to mark their
2000          * might already have been mapped has !! 1598  * pages reserved for the old functions anyway.
2001          * bail out on any zeropages. Zeropag !! 1599  */
2002          * be unshared using FAULT_FLAG_UNSHA << 
2003          */                                   << 
2004         if (mm_forbids_zeropage(vma->vm_mm))  << 
2005                 return false;                 << 
2006         /* zeropages in COW mappings are comm << 
2007         if (is_cow_mapping(vma->vm_flags))    << 
2008                 return true;                  << 
2009         /* Mappings that do not allow for wri << 
2010         if (!(vma->vm_flags & (VM_WRITE | VM_ << 
2011                 return true;                  << 
2012         /*                                    << 
2013          * Why not allow any VMA that has vm_ << 
2014          * find the shared zeropage and longt << 
2015          * be problematic as soon as the zero << 
2016          * page due to vma->vm_ops->pfn_mkwri << 
2017          * now differ to what GUP looked up.  << 
2018          * FOLL_LONGTERM and VM_IO is incompa << 
2019          * check_vma_flags).                  << 
2020          */                                   << 
2021         return vma->vm_ops && vma->vm_ops->pf << 
2022                (vma_is_fsdax(vma) || vma->vm_ << 
2023 }                                             << 
2024                                               << 
2025 static int validate_page_before_insert(struct << 
2026                                        struct << 
2027 {                                             << 
2028         struct folio *folio = page_folio(page << 
2029                                               << 
2030         if (!folio_ref_count(folio))          << 
2031                 return -EINVAL;               << 
2032         if (unlikely(is_zero_folio(folio))) { << 
2033                 if (!vm_mixed_zeropage_allowe << 
2034                         return -EINVAL;       << 
2035                 return 0;                     << 
2036         }                                     << 
2037         if (folio_test_anon(folio) || folio_t << 
2038             page_has_type(page))              << 
2039                 return -EINVAL;               << 
2040         flush_dcache_folio(folio);            << 
2041         return 0;                             << 
2042 }                                             << 
2043                                               << 
2044 static int insert_page_into_pte_locked(struct << 
2045                         unsigned long addr, s << 
2046 {                                             << 
2047         struct folio *folio = page_folio(page << 
2048         pte_t pteval;                         << 
2049                                               << 
2050         if (!pte_none(ptep_get(pte)))         << 
2051                 return -EBUSY;                << 
2052         /* Ok, finally just insert the thing. << 
2053         pteval = mk_pte(page, prot);          << 
2054         if (unlikely(is_zero_folio(folio))) { << 
2055                 pteval = pte_mkspecial(pteval << 
2056         } else {                              << 
2057                 folio_get(folio);             << 
2058                 inc_mm_counter(vma->vm_mm, mm << 
2059                 folio_add_file_rmap_pte(folio << 
2060         }                                     << 
2061         set_pte_at(vma->vm_mm, addr, pte, pte << 
2062         return 0;                             << 
2063 }                                             << 
2064                                               << 
2065 static int insert_page(struct vm_area_struct     1600 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2066                         struct page *page, pg    1601                         struct page *page, pgprot_t prot)
2067 {                                                1602 {
                                                   >> 1603         struct mm_struct *mm = vma->vm_mm;
2068         int retval;                              1604         int retval;
2069         pte_t *pte;                              1605         pte_t *pte;
2070         spinlock_t *ptl;                         1606         spinlock_t *ptl;
2071                                                  1607 
2072         retval = validate_page_before_insert( !! 1608         retval = -EINVAL;
2073         if (retval)                           !! 1609         if (PageAnon(page))
2074                 goto out;                        1610                 goto out;
2075         retval = -ENOMEM;                        1611         retval = -ENOMEM;
2076         pte = get_locked_pte(vma->vm_mm, addr !! 1612         flush_dcache_page(page);
                                                   >> 1613         pte = get_locked_pte(mm, addr, &ptl);
2077         if (!pte)                                1614         if (!pte)
2078                 goto out;                        1615                 goto out;
2079         retval = insert_page_into_pte_locked( !! 1616         retval = -EBUSY;
2080         pte_unmap_unlock(pte, ptl);           !! 1617         if (!pte_none(*pte))
2081 out:                                          !! 1618                 goto out_unlock;
2082         return retval;                        << 
2083 }                                             << 
2084                                               << 
2085 static int insert_page_in_batch_locked(struct << 
2086                         unsigned long addr, s << 
2087 {                                             << 
2088         int err;                              << 
2089                                               << 
2090         err = validate_page_before_insert(vma << 
2091         if (err)                              << 
2092                 return err;                   << 
2093         return insert_page_into_pte_locked(vm << 
2094 }                                             << 
2095                                               << 
2096 /* insert_pages() amortizes the cost of spinl << 
2097  * when inserting pages in a loop.            << 
2098  */                                           << 
2099 static int insert_pages(struct vm_area_struct << 
2100                         struct page **pages,  << 
2101 {                                             << 
2102         pmd_t *pmd = NULL;                    << 
2103         pte_t *start_pte, *pte;               << 
2104         spinlock_t *pte_lock;                 << 
2105         struct mm_struct *const mm = vma->vm_ << 
2106         unsigned long curr_page_idx = 0;      << 
2107         unsigned long remaining_pages_total = << 
2108         unsigned long pages_to_write_in_pmd;  << 
2109         int ret;                              << 
2110 more:                                         << 
2111         ret = -EFAULT;                        << 
2112         pmd = walk_to_pmd(mm, addr);          << 
2113         if (!pmd)                             << 
2114                 goto out;                     << 
2115                                               << 
2116         pages_to_write_in_pmd = min_t(unsigne << 
2117                 remaining_pages_total, PTRS_P << 
2118                                                  1619 
2119         /* Allocate the PTE if necessary; tak !! 1620         /* Ok, finally just insert the thing.. */
2120         ret = -ENOMEM;                        !! 1621         get_page(page);
2121         if (pte_alloc(mm, pmd))               !! 1622         inc_mm_counter_fast(mm, mm_counter_file(page));
2122                 goto out;                     !! 1623         page_add_file_rmap(page, false);
                                                   >> 1624         set_pte_at(mm, addr, pte, mk_pte(page, prot));
2123                                                  1625 
2124         while (pages_to_write_in_pmd) {       !! 1626         retval = 0;
2125                 int pte_idx = 0;              !! 1627         pte_unmap_unlock(pte, ptl);
2126                 const int batch_size = min_t( !! 1628         return retval;
2127                                               !! 1629 out_unlock:
2128                 start_pte = pte_offset_map_lo !! 1630         pte_unmap_unlock(pte, ptl);
2129                 if (!start_pte) {             << 
2130                         ret = -EFAULT;        << 
2131                         goto out;             << 
2132                 }                             << 
2133                 for (pte = start_pte; pte_idx << 
2134                         int err = insert_page << 
2135                                 addr, pages[c << 
2136                         if (unlikely(err)) {  << 
2137                                 pte_unmap_unl << 
2138                                 ret = err;    << 
2139                                 remaining_pag << 
2140                                 goto out;     << 
2141                         }                     << 
2142                         addr += PAGE_SIZE;    << 
2143                         ++curr_page_idx;      << 
2144                 }                             << 
2145                 pte_unmap_unlock(start_pte, p << 
2146                 pages_to_write_in_pmd -= batc << 
2147                 remaining_pages_total -= batc << 
2148         }                                     << 
2149         if (remaining_pages_total)            << 
2150                 goto more;                    << 
2151         ret = 0;                              << 
2152 out:                                             1631 out:
2153         *num = remaining_pages_total;         !! 1632         return retval;
2154         return ret;                           << 
2155 }                                             << 
2156                                               << 
2157 /**                                           << 
2158  * vm_insert_pages - insert multiple pages in << 
2159  * @vma: user vma to map to                   << 
2160  * @addr: target start user address of these  << 
2161  * @pages: source kernel pages                << 
2162  * @num: in: number of pages to map. out: num << 
2163  * mapped. (0 means all pages were successful << 
2164  *                                            << 
2165  * Preferred over vm_insert_page() when inser << 
2166  *                                            << 
2167  * In case of error, we may have mapped a sub << 
2168  * pages. It is the caller's responsibility t << 
2169  *                                            << 
2170  * The same restrictions apply as in vm_inser << 
2171  */                                           << 
2172 int vm_insert_pages(struct vm_area_struct *vm << 
2173                         struct page **pages,  << 
2174 {                                             << 
2175         const unsigned long end_addr = addr + << 
2176                                               << 
2177         if (addr < vma->vm_start || end_addr  << 
2178                 return -EFAULT;               << 
2179         if (!(vma->vm_flags & VM_MIXEDMAP)) { << 
2180                 BUG_ON(mmap_read_trylock(vma- << 
2181                 BUG_ON(vma->vm_flags & VM_PFN << 
2182                 vm_flags_set(vma, VM_MIXEDMAP << 
2183         }                                     << 
2184         /* Defer page refcount checking till  << 
2185         return insert_pages(vma, addr, pages, << 
2186 }                                                1633 }
2187 EXPORT_SYMBOL(vm_insert_pages);               << 
2188                                                  1634 
2189 /**                                              1635 /**
2190  * vm_insert_page - insert single page into u    1636  * vm_insert_page - insert single page into user vma
2191  * @vma: user vma to map to                      1637  * @vma: user vma to map to
2192  * @addr: target user address of this page       1638  * @addr: target user address of this page
2193  * @page: source kernel page                     1639  * @page: source kernel page
2194  *                                               1640  *
2195  * This allows drivers to insert individual p    1641  * This allows drivers to insert individual pages they've allocated
2196  * into a user vma. The zeropage is supported !! 1642  * into a user vma.
2197  * see vm_mixed_zeropage_allowed().           << 
2198  *                                               1643  *
2199  * The page has to be a nice clean _individua    1644  * The page has to be a nice clean _individual_ kernel allocation.
2200  * If you allocate a compound page, you need     1645  * If you allocate a compound page, you need to have marked it as
2201  * such (__GFP_COMP), or manually just split     1646  * such (__GFP_COMP), or manually just split the page up yourself
2202  * (see split_page()).                           1647  * (see split_page()).
2203  *                                               1648  *
2204  * NOTE! Traditionally this was done with "re    1649  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2205  * took an arbitrary page protection paramete    1650  * took an arbitrary page protection parameter. This doesn't allow
2206  * that. Your vma protection will have to be     1651  * that. Your vma protection will have to be set up correctly, which
2207  * means that if you want a shared writable m    1652  * means that if you want a shared writable mapping, you'd better
2208  * ask for a shared writable mapping!            1653  * ask for a shared writable mapping!
2209  *                                               1654  *
2210  * The page does not need to be reserved.        1655  * The page does not need to be reserved.
2211  *                                               1656  *
2212  * Usually this function is called from f_op-    1657  * Usually this function is called from f_op->mmap() handler
2213  * under mm->mmap_lock write-lock, so it can  !! 1658  * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
2214  * Caller must set VM_MIXEDMAP on vma if it w    1659  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2215  * function from other places, for example fr    1660  * function from other places, for example from page-fault handler.
2216  *                                            << 
2217  * Return: %0 on success, negative error code << 
2218  */                                              1661  */
2219 int vm_insert_page(struct vm_area_struct *vma    1662 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2220                         struct page *page)       1663                         struct page *page)
2221 {                                                1664 {
2222         if (addr < vma->vm_start || addr >= v    1665         if (addr < vma->vm_start || addr >= vma->vm_end)
2223                 return -EFAULT;                  1666                 return -EFAULT;
                                                   >> 1667         if (!page_count(page))
                                                   >> 1668                 return -EINVAL;
2224         if (!(vma->vm_flags & VM_MIXEDMAP)) {    1669         if (!(vma->vm_flags & VM_MIXEDMAP)) {
2225                 BUG_ON(mmap_read_trylock(vma- !! 1670                 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
2226                 BUG_ON(vma->vm_flags & VM_PFN    1671                 BUG_ON(vma->vm_flags & VM_PFNMAP);
2227                 vm_flags_set(vma, VM_MIXEDMAP !! 1672                 vma->vm_flags |= VM_MIXEDMAP;
2228         }                                        1673         }
2229         return insert_page(vma, addr, page, v    1674         return insert_page(vma, addr, page, vma->vm_page_prot);
2230 }                                                1675 }
2231 EXPORT_SYMBOL(vm_insert_page);                   1676 EXPORT_SYMBOL(vm_insert_page);
2232                                                  1677 
2233 /*                                            !! 1678 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2234  * __vm_map_pages - maps range of kernel page !! 1679                         pfn_t pfn, pgprot_t prot)
2235  * @vma: user vma to map to                   << 
2236  * @pages: pointer to array of source kernel  << 
2237  * @num: number of pages in page array        << 
2238  * @offset: user's requested vm_pgoff         << 
2239  *                                            << 
2240  * This allows drivers to map range of kernel << 
2241  * The zeropage is supported in some VMAs, se << 
2242  * vm_mixed_zeropage_allowed().               << 
2243  *                                            << 
2244  * Return: 0 on success and error code otherw << 
2245  */                                           << 
2246 static int __vm_map_pages(struct vm_area_stru << 
2247                                 unsigned long << 
2248 {                                             << 
2249         unsigned long count = vma_pages(vma); << 
2250         unsigned long uaddr = vma->vm_start;  << 
2251         int ret, i;                           << 
2252                                               << 
2253         /* Fail if the user requested offset  << 
2254         if (offset >= num)                    << 
2255                 return -ENXIO;                << 
2256                                               << 
2257         /* Fail if the user requested size ex << 
2258         if (count > num - offset)             << 
2259                 return -ENXIO;                << 
2260                                               << 
2261         for (i = 0; i < count; i++) {         << 
2262                 ret = vm_insert_page(vma, uad << 
2263                 if (ret < 0)                  << 
2264                         return ret;           << 
2265                 uaddr += PAGE_SIZE;           << 
2266         }                                     << 
2267                                               << 
2268         return 0;                             << 
2269 }                                             << 
2270                                               << 
2271 /**                                           << 
2272  * vm_map_pages - maps range of kernel pages  << 
2273  * @vma: user vma to map to                   << 
2274  * @pages: pointer to array of source kernel  << 
2275  * @num: number of pages in page array        << 
2276  *                                            << 
2277  * Maps an object consisting of @num pages, c << 
2278  * requested vm_pgoff                         << 
2279  *                                            << 
2280  * If we fail to insert any page into the vma << 
2281  * immediately leaving any previously inserte << 
2282  * from the mmap handler may immediately retu << 
2283  * will destroy the vma, removing any success << 
2284  * callers should make their own arrangements << 
2285  *                                            << 
2286  * Context: Process context. Called by mmap h << 
2287  * Return: 0 on success and error code otherw << 
2288  */                                           << 
2289 int vm_map_pages(struct vm_area_struct *vma,  << 
2290                                 unsigned long << 
2291 {                                             << 
2292         return __vm_map_pages(vma, pages, num << 
2293 }                                             << 
2294 EXPORT_SYMBOL(vm_map_pages);                  << 
2295                                               << 
2296 /**                                           << 
2297  * vm_map_pages_zero - map range of kernel pa << 
2298  * @vma: user vma to map to                   << 
2299  * @pages: pointer to array of source kernel  << 
2300  * @num: number of pages in page array        << 
2301  *                                            << 
2302  * Similar to vm_map_pages(), except that it  << 
2303  * to 0. This function is intended for the dr << 
2304  * vm_pgoff.                                  << 
2305  *                                            << 
2306  * Context: Process context. Called by mmap h << 
2307  * Return: 0 on success and error code otherw << 
2308  */                                           << 
2309 int vm_map_pages_zero(struct vm_area_struct * << 
2310                                 unsigned long << 
2311 {                                             << 
2312         return __vm_map_pages(vma, pages, num << 
2313 }                                             << 
2314 EXPORT_SYMBOL(vm_map_pages_zero);             << 
2315                                               << 
2316 static vm_fault_t insert_pfn(struct vm_area_s << 
2317                         pfn_t pfn, pgprot_t p << 
2318 {                                                1680 {
2319         struct mm_struct *mm = vma->vm_mm;       1681         struct mm_struct *mm = vma->vm_mm;
                                                   >> 1682         int retval;
2320         pte_t *pte, entry;                       1683         pte_t *pte, entry;
2321         spinlock_t *ptl;                         1684         spinlock_t *ptl;
2322                                                  1685 
                                                   >> 1686         retval = -ENOMEM;
2323         pte = get_locked_pte(mm, addr, &ptl);    1687         pte = get_locked_pte(mm, addr, &ptl);
2324         if (!pte)                                1688         if (!pte)
2325                 return VM_FAULT_OOM;          !! 1689                 goto out;
2326         entry = ptep_get(pte);                !! 1690         retval = -EBUSY;
2327         if (!pte_none(entry)) {               !! 1691         if (!pte_none(*pte))
2328                 if (mkwrite) {                << 
2329                         /*                    << 
2330                          * For read faults on << 
2331                          * in may not match t << 
2332                          * mapped PFN is a wr << 
2333                          * case we are creati << 
2334                          * mapping and we exp << 
2335                          * don't match, we ar << 
2336                          * allocation and map << 
2337                          * update.            << 
2338                          */                   << 
2339                         if (pte_pfn(entry) != << 
2340                                 WARN_ON_ONCE( << 
2341                                 goto out_unlo << 
2342                         }                     << 
2343                         entry = pte_mkyoung(e << 
2344                         entry = maybe_mkwrite << 
2345                         if (ptep_set_access_f << 
2346                                 update_mmu_ca << 
2347                 }                             << 
2348                 goto out_unlock;                 1692                 goto out_unlock;
2349         }                                     << 
2350                                                  1693 
2351         /* Ok, finally just insert the thing.    1694         /* Ok, finally just insert the thing.. */
2352         if (pfn_t_devmap(pfn))                   1695         if (pfn_t_devmap(pfn))
2353                 entry = pte_mkdevmap(pfn_t_pt    1696                 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2354         else                                     1697         else
2355                 entry = pte_mkspecial(pfn_t_p    1698                 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2356                                               << 
2357         if (mkwrite) {                        << 
2358                 entry = pte_mkyoung(entry);   << 
2359                 entry = maybe_mkwrite(pte_mkd << 
2360         }                                     << 
2361                                               << 
2362         set_pte_at(mm, addr, pte, entry);        1699         set_pte_at(mm, addr, pte, entry);
2363         update_mmu_cache(vma, addr, pte); /*     1700         update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2364                                                  1701 
                                                   >> 1702         retval = 0;
2365 out_unlock:                                      1703 out_unlock:
2366         pte_unmap_unlock(pte, ptl);              1704         pte_unmap_unlock(pte, ptl);
2367         return VM_FAULT_NOPAGE;               !! 1705 out:
                                                   >> 1706         return retval;
                                                   >> 1707 }
                                                   >> 1708 
                                                   >> 1709 /**
                                                   >> 1710  * vm_insert_pfn - insert single pfn into user vma
                                                   >> 1711  * @vma: user vma to map to
                                                   >> 1712  * @addr: target user address of this page
                                                   >> 1713  * @pfn: source kernel pfn
                                                   >> 1714  *
                                                   >> 1715  * Similar to vm_insert_page, this allows drivers to insert individual pages
                                                   >> 1716  * they've allocated into a user vma. Same comments apply.
                                                   >> 1717  *
                                                   >> 1718  * This function should only be called from a vm_ops->fault handler, and
                                                   >> 1719  * in that case the handler should return NULL.
                                                   >> 1720  *
                                                   >> 1721  * vma cannot be a COW mapping.
                                                   >> 1722  *
                                                   >> 1723  * As this is called only for pages that do not currently exist, we
                                                   >> 1724  * do not need to flush old virtual caches or the TLB.
                                                   >> 1725  */
                                                   >> 1726 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                                                   >> 1727                         unsigned long pfn)
                                                   >> 1728 {
                                                   >> 1729         return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2368 }                                                1730 }
                                                   >> 1731 EXPORT_SYMBOL(vm_insert_pfn);
2369                                                  1732 
2370 /**                                              1733 /**
2371  * vmf_insert_pfn_prot - insert single pfn in !! 1734  * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2372  * @vma: user vma to map to                      1735  * @vma: user vma to map to
2373  * @addr: target user address of this page       1736  * @addr: target user address of this page
2374  * @pfn: source kernel pfn                       1737  * @pfn: source kernel pfn
2375  * @pgprot: pgprot flags for the inserted pag    1738  * @pgprot: pgprot flags for the inserted page
2376  *                                               1739  *
2377  * This is exactly like vmf_insert_pfn(), exc !! 1740  * This is exactly like vm_insert_pfn, except that it allows drivers to
2378  * to override pgprot on a per-page basis.       1741  * to override pgprot on a per-page basis.
2379  *                                               1742  *
2380  * This only makes sense for IO mappings, and    1743  * This only makes sense for IO mappings, and it makes no sense for
2381  * COW mappings.  In general, using multiple  !! 1744  * cow mappings.  In general, using multiple vmas is preferable;
2382  * vmf_insert_pfn_prot should only be used if !! 1745  * vm_insert_pfn_prot should only be used if using multiple VMAs is
2383  * impractical.                                  1746  * impractical.
2384  *                                            << 
2385  * pgprot typically only differs from @vma->v << 
2386  * caching- and encryption bits different tha << 
2387  * because the caching- or encryption mode ma << 
2388  *                                            << 
2389  * This is ok as long as @vma->vm_page_prot i << 
2390  * to set caching and encryption bits for tho << 
2391  * This is ensured by core vm only modifying  << 
2392  * functions that don't touch caching- or enc << 
2393  * if needed. (See for example mprotect()).   << 
2394  *                                            << 
2395  * Also when new page-table entries are creat << 
2396  * fault() callback, and never using the valu << 
2397  * except for page-table entries that point t << 
2398  * of COW.                                    << 
2399  *                                            << 
2400  * Context: Process context.  May allocate us << 
2401  * Return: vm_fault_t value.                  << 
2402  */                                              1747  */
2403 vm_fault_t vmf_insert_pfn_prot(struct vm_area !! 1748 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2404                         unsigned long pfn, pg    1749                         unsigned long pfn, pgprot_t pgprot)
2405 {                                                1750 {
                                                   >> 1751         int ret;
2406         /*                                       1752         /*
2407          * Technically, architectures with pt    1753          * Technically, architectures with pte_special can avoid all these
2408          * restrictions (same for remap_pfn_r    1754          * restrictions (same for remap_pfn_range).  However we would like
2409          * consistency in testing and feature    1755          * consistency in testing and feature parity among all, so we should
2410          * try to keep these invariants in pl    1756          * try to keep these invariants in place for everybody.
2411          */                                      1757          */
2412         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|V    1758         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2413         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM    1759         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2414                                                  1760                                                 (VM_PFNMAP|VM_MIXEDMAP));
2415         BUG_ON((vma->vm_flags & VM_PFNMAP) &&    1761         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2416         BUG_ON((vma->vm_flags & VM_MIXEDMAP)     1762         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2417                                                  1763 
2418         if (addr < vma->vm_start || addr >= v    1764         if (addr < vma->vm_start || addr >= vma->vm_end)
2419                 return VM_FAULT_SIGBUS;       !! 1765                 return -EFAULT;
2420                                               << 
2421         if (!pfn_modify_allowed(pfn, pgprot)) << 
2422                 return VM_FAULT_SIGBUS;       << 
2423                                                  1766 
2424         track_pfn_insert(vma, &pgprot, __pfn_    1767         track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2425                                                  1768 
2426         return insert_pfn(vma, addr, __pfn_to !! 1769         ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
2427                         false);               << 
2428 }                                             << 
2429 EXPORT_SYMBOL(vmf_insert_pfn_prot);           << 
2430                                                  1770 
2431 /**                                           !! 1771         return ret;
2432  * vmf_insert_pfn - insert single pfn into us << 
2433  * @vma: user vma to map to                   << 
2434  * @addr: target user address of this page    << 
2435  * @pfn: source kernel pfn                    << 
2436  *                                            << 
2437  * Similar to vm_insert_page, this allows dri << 
2438  * they've allocated into a user vma. Same co << 
2439  *                                            << 
2440  * This function should only be called from a << 
2441  * in that case the handler should return the << 
2442  *                                            << 
2443  * vma cannot be a COW mapping.               << 
2444  *                                            << 
2445  * As this is called only for pages that do n << 
2446  * do not need to flush old virtual caches or << 
2447  *                                            << 
2448  * Context: Process context.  May allocate us << 
2449  * Return: vm_fault_t value.                  << 
2450  */                                           << 
2451 vm_fault_t vmf_insert_pfn(struct vm_area_stru << 
2452                         unsigned long pfn)    << 
2453 {                                             << 
2454         return vmf_insert_pfn_prot(vma, addr, << 
2455 }                                             << 
2456 EXPORT_SYMBOL(vmf_insert_pfn);                << 
2457                                               << 
2458 static bool vm_mixed_ok(struct vm_area_struct << 
2459 {                                             << 
2460         if (unlikely(is_zero_pfn(pfn_t_to_pfn << 
2461             (mkwrite || !vm_mixed_zeropage_al << 
2462                 return false;                 << 
2463         /* these checks mirror the abort cond << 
2464         if (vma->vm_flags & VM_MIXEDMAP)      << 
2465                 return true;                  << 
2466         if (pfn_t_devmap(pfn))                << 
2467                 return true;                  << 
2468         if (pfn_t_special(pfn))               << 
2469                 return true;                  << 
2470         if (is_zero_pfn(pfn_t_to_pfn(pfn)))   << 
2471                 return true;                  << 
2472         return false;                         << 
2473 }                                                1772 }
                                                   >> 1773 EXPORT_SYMBOL(vm_insert_pfn_prot);
2474                                                  1774 
2475 static vm_fault_t __vm_insert_mixed(struct vm !! 1775 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2476                 unsigned long addr, pfn_t pfn !! 1776                         pfn_t pfn)
2477 {                                                1777 {
2478         pgprot_t pgprot = vma->vm_page_prot;     1778         pgprot_t pgprot = vma->vm_page_prot;
2479         int err;                              << 
2480                                                  1779 
2481         if (!vm_mixed_ok(vma, pfn, mkwrite))  !! 1780         BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
2482                 return VM_FAULT_SIGBUS;       << 
2483                                                  1781 
2484         if (addr < vma->vm_start || addr >= v    1782         if (addr < vma->vm_start || addr >= vma->vm_end)
2485                 return VM_FAULT_SIGBUS;       !! 1783                 return -EFAULT;
2486                                                  1784 
2487         track_pfn_insert(vma, &pgprot, pfn);     1785         track_pfn_insert(vma, &pgprot, pfn);
2488                                                  1786 
2489         if (!pfn_modify_allowed(pfn_t_to_pfn( << 
2490                 return VM_FAULT_SIGBUS;       << 
2491                                               << 
2492         /*                                       1787         /*
2493          * If we don't have pte special, then    1788          * If we don't have pte special, then we have to use the pfn_valid()
2494          * based VM_MIXEDMAP scheme (see vm_n    1789          * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2495          * refcount the page if pfn_valid is     1790          * refcount the page if pfn_valid is true (hence insert_page rather
2496          * than insert_pfn).  If a zero_pfn w    1791          * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2497          * without pte special, it would ther    1792          * without pte special, it would there be refcounted as a normal page.
2498          */                                      1793          */
2499         if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_S !! 1794         if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2500             !pfn_t_devmap(pfn) && pfn_t_valid << 
2501                 struct page *page;               1795                 struct page *page;
2502                                                  1796 
2503                 /*                               1797                 /*
2504                  * At this point we are commi    1798                  * At this point we are committed to insert_page()
2505                  * regardless of whether the     1799                  * regardless of whether the caller specified flags that
2506                  * result in pfn_t_has_page()    1800                  * result in pfn_t_has_page() == false.
2507                  */                              1801                  */
2508                 page = pfn_to_page(pfn_t_to_p    1802                 page = pfn_to_page(pfn_t_to_pfn(pfn));
2509                 err = insert_page(vma, addr,  !! 1803                 return insert_page(vma, addr, page, pgprot);
2510         } else {                              << 
2511                 return insert_pfn(vma, addr,  << 
2512         }                                        1804         }
2513                                               !! 1805         return insert_pfn(vma, addr, pfn, pgprot);
2514         if (err == -ENOMEM)                   << 
2515                 return VM_FAULT_OOM;          << 
2516         if (err < 0 && err != -EBUSY)         << 
2517                 return VM_FAULT_SIGBUS;       << 
2518                                               << 
2519         return VM_FAULT_NOPAGE;               << 
2520 }                                             << 
2521                                               << 
2522 vm_fault_t vmf_insert_mixed(struct vm_area_st << 
2523                 pfn_t pfn)                    << 
2524 {                                             << 
2525         return __vm_insert_mixed(vma, addr, p << 
2526 }                                             << 
2527 EXPORT_SYMBOL(vmf_insert_mixed);              << 
2528                                               << 
2529 /*                                            << 
2530  *  If the insertion of PTE failed because so << 
2531  *  different entry in the mean time, we trea << 
2532  *  the same entry was actually inserted.     << 
2533  */                                           << 
2534 vm_fault_t vmf_insert_mixed_mkwrite(struct vm << 
2535                 unsigned long addr, pfn_t pfn << 
2536 {                                             << 
2537         return __vm_insert_mixed(vma, addr, p << 
2538 }                                                1806 }
                                                   >> 1807 EXPORT_SYMBOL(vm_insert_mixed);
2539                                                  1808 
2540 /*                                               1809 /*
2541  * maps a range of physical memory into the r    1810  * maps a range of physical memory into the requested pages. the old
2542  * mappings are removed. any references to no    1811  * mappings are removed. any references to nonexistent pages results
2543  * in null mappings (currently treated as "co    1812  * in null mappings (currently treated as "copy-on-access")
2544  */                                              1813  */
2545 static int remap_pte_range(struct mm_struct *    1814 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2546                         unsigned long addr, u    1815                         unsigned long addr, unsigned long end,
2547                         unsigned long pfn, pg    1816                         unsigned long pfn, pgprot_t prot)
2548 {                                                1817 {
2549         pte_t *pte, *mapped_pte;              !! 1818         pte_t *pte;
2550         spinlock_t *ptl;                         1819         spinlock_t *ptl;
2551         int err = 0;                          << 
2552                                                  1820 
2553         mapped_pte = pte = pte_alloc_map_lock !! 1821         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2554         if (!pte)                                1822         if (!pte)
2555                 return -ENOMEM;                  1823                 return -ENOMEM;
2556         arch_enter_lazy_mmu_mode();              1824         arch_enter_lazy_mmu_mode();
2557         do {                                     1825         do {
2558                 BUG_ON(!pte_none(ptep_get(pte !! 1826                 BUG_ON(!pte_none(*pte));
2559                 if (!pfn_modify_allowed(pfn,  << 
2560                         err = -EACCES;        << 
2561                         break;                << 
2562                 }                             << 
2563                 set_pte_at(mm, addr, pte, pte    1827                 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2564                 pfn++;                           1828                 pfn++;
2565         } while (pte++, addr += PAGE_SIZE, ad    1829         } while (pte++, addr += PAGE_SIZE, addr != end);
2566         arch_leave_lazy_mmu_mode();              1830         arch_leave_lazy_mmu_mode();
2567         pte_unmap_unlock(mapped_pte, ptl);    !! 1831         pte_unmap_unlock(pte - 1, ptl);
2568         return err;                           !! 1832         return 0;
2569 }                                                1833 }
2570                                                  1834 
2571 static inline int remap_pmd_range(struct mm_s    1835 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2572                         unsigned long addr, u    1836                         unsigned long addr, unsigned long end,
2573                         unsigned long pfn, pg    1837                         unsigned long pfn, pgprot_t prot)
2574 {                                                1838 {
2575         pmd_t *pmd;                              1839         pmd_t *pmd;
2576         unsigned long next;                      1840         unsigned long next;
2577         int err;                              << 
2578                                                  1841 
2579         pfn -= addr >> PAGE_SHIFT;               1842         pfn -= addr >> PAGE_SHIFT;
2580         pmd = pmd_alloc(mm, pud, addr);          1843         pmd = pmd_alloc(mm, pud, addr);
2581         if (!pmd)                                1844         if (!pmd)
2582                 return -ENOMEM;                  1845                 return -ENOMEM;
2583         VM_BUG_ON(pmd_trans_huge(*pmd));         1846         VM_BUG_ON(pmd_trans_huge(*pmd));
2584         do {                                     1847         do {
2585                 next = pmd_addr_end(addr, end    1848                 next = pmd_addr_end(addr, end);
2586                 err = remap_pte_range(mm, pmd !! 1849                 if (remap_pte_range(mm, pmd, addr, next,
2587                                 pfn + (addr > !! 1850                                 pfn + (addr >> PAGE_SHIFT), prot))
2588                 if (err)                      !! 1851                         return -ENOMEM;
2589                         return err;           << 
2590         } while (pmd++, addr = next, addr !=     1852         } while (pmd++, addr = next, addr != end);
2591         return 0;                                1853         return 0;
2592 }                                                1854 }
2593                                                  1855 
2594 static inline int remap_pud_range(struct mm_s    1856 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2595                         unsigned long addr, u    1857                         unsigned long addr, unsigned long end,
2596                         unsigned long pfn, pg    1858                         unsigned long pfn, pgprot_t prot)
2597 {                                                1859 {
2598         pud_t *pud;                              1860         pud_t *pud;
2599         unsigned long next;                      1861         unsigned long next;
2600         int err;                              << 
2601                                                  1862 
2602         pfn -= addr >> PAGE_SHIFT;               1863         pfn -= addr >> PAGE_SHIFT;
2603         pud = pud_alloc(mm, p4d, addr);          1864         pud = pud_alloc(mm, p4d, addr);
2604         if (!pud)                                1865         if (!pud)
2605                 return -ENOMEM;                  1866                 return -ENOMEM;
2606         do {                                     1867         do {
2607                 next = pud_addr_end(addr, end    1868                 next = pud_addr_end(addr, end);
2608                 err = remap_pmd_range(mm, pud !! 1869                 if (remap_pmd_range(mm, pud, addr, next,
2609                                 pfn + (addr > !! 1870                                 pfn + (addr >> PAGE_SHIFT), prot))
2610                 if (err)                      !! 1871                         return -ENOMEM;
2611                         return err;           << 
2612         } while (pud++, addr = next, addr !=     1872         } while (pud++, addr = next, addr != end);
2613         return 0;                                1873         return 0;
2614 }                                                1874 }
2615                                                  1875 
2616 static inline int remap_p4d_range(struct mm_s    1876 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2617                         unsigned long addr, u    1877                         unsigned long addr, unsigned long end,
2618                         unsigned long pfn, pg    1878                         unsigned long pfn, pgprot_t prot)
2619 {                                                1879 {
2620         p4d_t *p4d;                              1880         p4d_t *p4d;
2621         unsigned long next;                      1881         unsigned long next;
2622         int err;                              << 
2623                                                  1882 
2624         pfn -= addr >> PAGE_SHIFT;               1883         pfn -= addr >> PAGE_SHIFT;
2625         p4d = p4d_alloc(mm, pgd, addr);          1884         p4d = p4d_alloc(mm, pgd, addr);
2626         if (!p4d)                                1885         if (!p4d)
2627                 return -ENOMEM;                  1886                 return -ENOMEM;
2628         do {                                     1887         do {
2629                 next = p4d_addr_end(addr, end    1888                 next = p4d_addr_end(addr, end);
2630                 err = remap_pud_range(mm, p4d !! 1889                 if (remap_pud_range(mm, p4d, addr, next,
2631                                 pfn + (addr > !! 1890                                 pfn + (addr >> PAGE_SHIFT), prot))
2632                 if (err)                      !! 1891                         return -ENOMEM;
2633                         return err;           << 
2634         } while (p4d++, addr = next, addr !=     1892         } while (p4d++, addr = next, addr != end);
2635         return 0;                                1893         return 0;
2636 }                                                1894 }
2637                                                  1895 
2638 static int remap_pfn_range_internal(struct vm !! 1896 /**
2639                 unsigned long pfn, unsigned l !! 1897  * remap_pfn_range - remap kernel memory to userspace
                                                   >> 1898  * @vma: user vma to map to
                                                   >> 1899  * @addr: target user address to start at
                                                   >> 1900  * @pfn: physical address of kernel memory
                                                   >> 1901  * @size: size of map area
                                                   >> 1902  * @prot: page protection flags for this mapping
                                                   >> 1903  *
                                                   >> 1904  *  Note: this is only safe if the mm semaphore is held when called.
                                                   >> 1905  */
                                                   >> 1906 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                                                   >> 1907                     unsigned long pfn, unsigned long size, pgprot_t prot)
2640 {                                                1908 {
2641         pgd_t *pgd;                              1909         pgd_t *pgd;
2642         unsigned long next;                      1910         unsigned long next;
2643         unsigned long end = addr + PAGE_ALIGN    1911         unsigned long end = addr + PAGE_ALIGN(size);
2644         struct mm_struct *mm = vma->vm_mm;       1912         struct mm_struct *mm = vma->vm_mm;
                                                   >> 1913         unsigned long remap_pfn = pfn;
2645         int err;                                 1914         int err;
2646                                                  1915 
2647         if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)) << 
2648                 return -EINVAL;               << 
2649                                               << 
2650         /*                                       1916         /*
2651          * Physically remapped pages are spec    1917          * Physically remapped pages are special. Tell the
2652          * rest of the world about it:           1918          * rest of the world about it:
2653          *   VM_IO tells people not to look a    1919          *   VM_IO tells people not to look at these pages
2654          *      (accesses can have side effec    1920          *      (accesses can have side effects).
2655          *   VM_PFNMAP tells the core MM that    1921          *   VM_PFNMAP tells the core MM that the base pages are just
2656          *      raw PFN mappings, and do not     1922          *      raw PFN mappings, and do not have a "struct page" associated
2657          *      with them.                       1923          *      with them.
2658          *   VM_DONTEXPAND                       1924          *   VM_DONTEXPAND
2659          *      Disable vma merging and expan    1925          *      Disable vma merging and expanding with mremap().
2660          *   VM_DONTDUMP                         1926          *   VM_DONTDUMP
2661          *      Omit vma from core dump, even    1927          *      Omit vma from core dump, even when VM_IO turned off.
2662          *                                       1928          *
2663          * There's a horrible special case to    1929          * There's a horrible special case to handle copy-on-write
2664          * behaviour that some programs depen    1930          * behaviour that some programs depend on. We mark the "original"
2665          * un-COW'ed pages by matching them u    1931          * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2666          * See vm_normal_page() for details.     1932          * See vm_normal_page() for details.
2667          */                                      1933          */
2668         if (is_cow_mapping(vma->vm_flags)) {     1934         if (is_cow_mapping(vma->vm_flags)) {
2669                 if (addr != vma->vm_start ||     1935                 if (addr != vma->vm_start || end != vma->vm_end)
2670                         return -EINVAL;          1936                         return -EINVAL;
2671                 vma->vm_pgoff = pfn;             1937                 vma->vm_pgoff = pfn;
2672         }                                        1938         }
2673                                                  1939 
2674         vm_flags_set(vma, VM_IO | VM_PFNMAP | !! 1940         err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
                                                   >> 1941         if (err)
                                                   >> 1942                 return -EINVAL;
                                                   >> 1943 
                                                   >> 1944         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2675                                                  1945 
2676         BUG_ON(addr >= end);                     1946         BUG_ON(addr >= end);
2677         pfn -= addr >> PAGE_SHIFT;               1947         pfn -= addr >> PAGE_SHIFT;
2678         pgd = pgd_offset(mm, addr);              1948         pgd = pgd_offset(mm, addr);
2679         flush_cache_range(vma, addr, end);       1949         flush_cache_range(vma, addr, end);
2680         do {                                     1950         do {
2681                 next = pgd_addr_end(addr, end    1951                 next = pgd_addr_end(addr, end);
2682                 err = remap_p4d_range(mm, pgd    1952                 err = remap_p4d_range(mm, pgd, addr, next,
2683                                 pfn + (addr >    1953                                 pfn + (addr >> PAGE_SHIFT), prot);
2684                 if (err)                         1954                 if (err)
2685                         return err;           !! 1955                         break;
2686         } while (pgd++, addr = next, addr !=     1956         } while (pgd++, addr = next, addr != end);
2687                                                  1957 
2688         return 0;                             << 
2689 }                                             << 
2690                                               << 
2691 /*                                            << 
2692  * Variant of remap_pfn_range that does not c << 
2693  * must have pre-validated the caching bits o << 
2694  */                                           << 
2695 int remap_pfn_range_notrack(struct vm_area_st << 
2696                 unsigned long pfn, unsigned l << 
2697 {                                             << 
2698         int error = remap_pfn_range_internal( << 
2699                                               << 
2700         if (!error)                           << 
2701                 return 0;                     << 
2702                                               << 
2703         /*                                    << 
2704          * A partial pfn range mapping is dan << 
2705          * maintain page reference counts, an << 
2706          * pages due to the error. So zap it  << 
2707          */                                   << 
2708         zap_page_range_single(vma, addr, size << 
2709         return error;                         << 
2710 }                                             << 
2711                                               << 
2712 /**                                           << 
2713  * remap_pfn_range - remap kernel memory to u << 
2714  * @vma: user vma to map to                   << 
2715  * @addr: target page aligned user address to << 
2716  * @pfn: page frame number of kernel physical << 
2717  * @size: size of mapping area                << 
2718  * @prot: page protection flags for this mapp << 
2719  *                                            << 
2720  * Note: this is only safe if the mm semaphor << 
2721  *                                            << 
2722  * Return: %0 on success, negative error code << 
2723  */                                           << 
2724 int remap_pfn_range(struct vm_area_struct *vm << 
2725                     unsigned long pfn, unsign << 
2726 {                                             << 
2727         int err;                              << 
2728                                               << 
2729         err = track_pfn_remap(vma, &prot, pfn << 
2730         if (err)                                 1958         if (err)
2731                 return -EINVAL;               !! 1959                 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
2732                                                  1960 
2733         err = remap_pfn_range_notrack(vma, ad << 
2734         if (err)                              << 
2735                 untrack_pfn(vma, pfn, PAGE_AL << 
2736         return err;                              1961         return err;
2737 }                                                1962 }
2738 EXPORT_SYMBOL(remap_pfn_range);                  1963 EXPORT_SYMBOL(remap_pfn_range);
2739                                                  1964 
2740 /**                                              1965 /**
2741  * vm_iomap_memory - remap memory to userspac    1966  * vm_iomap_memory - remap memory to userspace
2742  * @vma: user vma to map to                      1967  * @vma: user vma to map to
2743  * @start: start of the physical memory to be !! 1968  * @start: start of area
2744  * @len: size of area                            1969  * @len: size of area
2745  *                                               1970  *
2746  * This is a simplified io_remap_pfn_range()     1971  * This is a simplified io_remap_pfn_range() for common driver use. The
2747  * driver just needs to give us the physical     1972  * driver just needs to give us the physical memory range to be mapped,
2748  * we'll figure out the rest from the vma inf    1973  * we'll figure out the rest from the vma information.
2749  *                                               1974  *
2750  * NOTE! Some drivers might want to tweak vma    1975  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2751  * whatever write-combining details or simila    1976  * whatever write-combining details or similar.
2752  *                                            << 
2753  * Return: %0 on success, negative error code << 
2754  */                                              1977  */
2755 int vm_iomap_memory(struct vm_area_struct *vm    1978 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2756 {                                                1979 {
2757         unsigned long vm_len, pfn, pages;        1980         unsigned long vm_len, pfn, pages;
2758                                                  1981 
2759         /* Check that the physical memory are    1982         /* Check that the physical memory area passed in looks valid */
2760         if (start + len < start)                 1983         if (start + len < start)
2761                 return -EINVAL;                  1984                 return -EINVAL;
2762         /*                                       1985         /*
2763          * You *really* shouldn't map things     1986          * You *really* shouldn't map things that aren't page-aligned,
2764          * but we've historically allowed it     1987          * but we've historically allowed it because IO memory might
2765          * just have smaller alignment.          1988          * just have smaller alignment.
2766          */                                      1989          */
2767         len += start & ~PAGE_MASK;               1990         len += start & ~PAGE_MASK;
2768         pfn = start >> PAGE_SHIFT;               1991         pfn = start >> PAGE_SHIFT;
2769         pages = (len + ~PAGE_MASK) >> PAGE_SH    1992         pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2770         if (pfn + pages < pfn)                   1993         if (pfn + pages < pfn)
2771                 return -EINVAL;                  1994                 return -EINVAL;
2772                                                  1995 
2773         /* We start the mapping 'vm_pgoff' pa    1996         /* We start the mapping 'vm_pgoff' pages into the area */
2774         if (vma->vm_pgoff > pages)               1997         if (vma->vm_pgoff > pages)
2775                 return -EINVAL;                  1998                 return -EINVAL;
2776         pfn += vma->vm_pgoff;                    1999         pfn += vma->vm_pgoff;
2777         pages -= vma->vm_pgoff;                  2000         pages -= vma->vm_pgoff;
2778                                                  2001 
2779         /* Can we fit all of the mapping? */     2002         /* Can we fit all of the mapping? */
2780         vm_len = vma->vm_end - vma->vm_start;    2003         vm_len = vma->vm_end - vma->vm_start;
2781         if (vm_len >> PAGE_SHIFT > pages)        2004         if (vm_len >> PAGE_SHIFT > pages)
2782                 return -EINVAL;                  2005                 return -EINVAL;
2783                                                  2006 
2784         /* Ok, let it rip */                     2007         /* Ok, let it rip */
2785         return io_remap_pfn_range(vma, vma->v    2008         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2786 }                                                2009 }
2787 EXPORT_SYMBOL(vm_iomap_memory);                  2010 EXPORT_SYMBOL(vm_iomap_memory);
2788                                                  2011 
2789 static int apply_to_pte_range(struct mm_struc    2012 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2790                                      unsigned    2013                                      unsigned long addr, unsigned long end,
2791                                      pte_fn_t !! 2014                                      pte_fn_t fn, void *data)
2792                                      pgtbl_mo << 
2793 {                                                2015 {
2794         pte_t *pte, *mapped_pte;              !! 2016         pte_t *pte;
2795         int err = 0;                          !! 2017         int err;
2796         spinlock_t *ptl;                      !! 2018         pgtable_t token;
                                                   >> 2019         spinlock_t *uninitialized_var(ptl);
2797                                                  2020 
2798         if (create) {                         !! 2021         pte = (mm == &init_mm) ?
2799                 mapped_pte = pte = (mm == &in !! 2022                 pte_alloc_kernel(pmd, addr) :
2800                         pte_alloc_kernel_trac !! 2023                 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2801                         pte_alloc_map_lock(mm !! 2024         if (!pte)
2802                 if (!pte)                     !! 2025                 return -ENOMEM;
2803                         return -ENOMEM;       !! 2026 
2804         } else {                              !! 2027         BUG_ON(pmd_huge(*pmd));
2805                 mapped_pte = pte = (mm == &in << 
2806                         pte_offset_kernel(pmd << 
2807                         pte_offset_map_lock(m << 
2808                 if (!pte)                     << 
2809                         return -EINVAL;       << 
2810         }                                     << 
2811                                                  2028 
2812         arch_enter_lazy_mmu_mode();              2029         arch_enter_lazy_mmu_mode();
2813                                                  2030 
2814         if (fn) {                             !! 2031         token = pmd_pgtable(*pmd);
2815                 do {                          !! 2032 
2816                         if (create || !pte_no !! 2033         do {
2817                                 err = fn(pte+ !! 2034                 err = fn(pte++, token, addr, data);
2818                                 if (err)      !! 2035                 if (err)
2819                                         break !! 2036                         break;
2820                         }                     !! 2037         } while (addr += PAGE_SIZE, addr != end);
2821                 } while (addr += PAGE_SIZE, a << 
2822         }                                     << 
2823         *mask |= PGTBL_PTE_MODIFIED;          << 
2824                                                  2038 
2825         arch_leave_lazy_mmu_mode();              2039         arch_leave_lazy_mmu_mode();
2826                                                  2040 
2827         if (mm != &init_mm)                      2041         if (mm != &init_mm)
2828                 pte_unmap_unlock(mapped_pte,  !! 2042                 pte_unmap_unlock(pte-1, ptl);
2829         return err;                              2043         return err;
2830 }                                                2044 }
2831                                                  2045 
2832 static int apply_to_pmd_range(struct mm_struc    2046 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2833                                      unsigned    2047                                      unsigned long addr, unsigned long end,
2834                                      pte_fn_t !! 2048                                      pte_fn_t fn, void *data)
2835                                      pgtbl_mo << 
2836 {                                                2049 {
2837         pmd_t *pmd;                              2050         pmd_t *pmd;
2838         unsigned long next;                      2051         unsigned long next;
2839         int err = 0;                          !! 2052         int err;
2840                                                  2053 
2841         BUG_ON(pud_leaf(*pud));               !! 2054         BUG_ON(pud_huge(*pud));
2842                                                  2055 
2843         if (create) {                         !! 2056         pmd = pmd_alloc(mm, pud, addr);
2844                 pmd = pmd_alloc_track(mm, pud !! 2057         if (!pmd)
2845                 if (!pmd)                     !! 2058                 return -ENOMEM;
2846                         return -ENOMEM;       << 
2847         } else {                              << 
2848                 pmd = pmd_offset(pud, addr);  << 
2849         }                                     << 
2850         do {                                     2059         do {
2851                 next = pmd_addr_end(addr, end    2060                 next = pmd_addr_end(addr, end);
2852                 if (pmd_none(*pmd) && !create !! 2061                 err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
2853                         continue;             << 
2854                 if (WARN_ON_ONCE(pmd_leaf(*pm << 
2855                         return -EINVAL;       << 
2856                 if (!pmd_none(*pmd) && WARN_O << 
2857                         if (!create)          << 
2858                                 continue;     << 
2859                         pmd_clear_bad(pmd);   << 
2860                 }                             << 
2861                 err = apply_to_pte_range(mm,  << 
2862                                          fn,  << 
2863                 if (err)                         2062                 if (err)
2864                         break;                   2063                         break;
2865         } while (pmd++, addr = next, addr !=     2064         } while (pmd++, addr = next, addr != end);
2866                                               << 
2867         return err;                              2065         return err;
2868 }                                                2066 }
2869                                                  2067 
2870 static int apply_to_pud_range(struct mm_struc    2068 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2871                                      unsigned    2069                                      unsigned long addr, unsigned long end,
2872                                      pte_fn_t !! 2070                                      pte_fn_t fn, void *data)
2873                                      pgtbl_mo << 
2874 {                                                2071 {
2875         pud_t *pud;                              2072         pud_t *pud;
2876         unsigned long next;                      2073         unsigned long next;
2877         int err = 0;                          !! 2074         int err;
2878                                                  2075 
2879         if (create) {                         !! 2076         pud = pud_alloc(mm, p4d, addr);
2880                 pud = pud_alloc_track(mm, p4d !! 2077         if (!pud)
2881                 if (!pud)                     !! 2078                 return -ENOMEM;
2882                         return -ENOMEM;       << 
2883         } else {                              << 
2884                 pud = pud_offset(p4d, addr);  << 
2885         }                                     << 
2886         do {                                     2079         do {
2887                 next = pud_addr_end(addr, end    2080                 next = pud_addr_end(addr, end);
2888                 if (pud_none(*pud) && !create !! 2081                 err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
2889                         continue;             << 
2890                 if (WARN_ON_ONCE(pud_leaf(*pu << 
2891                         return -EINVAL;       << 
2892                 if (!pud_none(*pud) && WARN_O << 
2893                         if (!create)          << 
2894                                 continue;     << 
2895                         pud_clear_bad(pud);   << 
2896                 }                             << 
2897                 err = apply_to_pmd_range(mm,  << 
2898                                          fn,  << 
2899                 if (err)                         2082                 if (err)
2900                         break;                   2083                         break;
2901         } while (pud++, addr = next, addr !=     2084         } while (pud++, addr = next, addr != end);
2902                                               << 
2903         return err;                              2085         return err;
2904 }                                                2086 }
2905                                                  2087 
2906 static int apply_to_p4d_range(struct mm_struc    2088 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2907                                      unsigned    2089                                      unsigned long addr, unsigned long end,
2908                                      pte_fn_t !! 2090                                      pte_fn_t fn, void *data)
2909                                      pgtbl_mo << 
2910 {                                                2091 {
2911         p4d_t *p4d;                              2092         p4d_t *p4d;
2912         unsigned long next;                      2093         unsigned long next;
2913         int err = 0;                          !! 2094         int err;
2914                                                  2095 
2915         if (create) {                         !! 2096         p4d = p4d_alloc(mm, pgd, addr);
2916                 p4d = p4d_alloc_track(mm, pgd !! 2097         if (!p4d)
2917                 if (!p4d)                     !! 2098                 return -ENOMEM;
2918                         return -ENOMEM;       << 
2919         } else {                              << 
2920                 p4d = p4d_offset(pgd, addr);  << 
2921         }                                     << 
2922         do {                                     2099         do {
2923                 next = p4d_addr_end(addr, end    2100                 next = p4d_addr_end(addr, end);
2924                 if (p4d_none(*p4d) && !create !! 2101                 err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
2925                         continue;             << 
2926                 if (WARN_ON_ONCE(p4d_leaf(*p4 << 
2927                         return -EINVAL;       << 
2928                 if (!p4d_none(*p4d) && WARN_O << 
2929                         if (!create)          << 
2930                                 continue;     << 
2931                         p4d_clear_bad(p4d);   << 
2932                 }                             << 
2933                 err = apply_to_pud_range(mm,  << 
2934                                          fn,  << 
2935                 if (err)                         2102                 if (err)
2936                         break;                   2103                         break;
2937         } while (p4d++, addr = next, addr !=     2104         } while (p4d++, addr = next, addr != end);
2938                                               << 
2939         return err;                              2105         return err;
2940 }                                                2106 }
2941                                                  2107 
2942 static int __apply_to_page_range(struct mm_st !! 2108 /*
2943                                  unsigned lon !! 2109  * Scan a region of virtual memory, filling in page tables as necessary
2944                                  void *data,  !! 2110  * and calling a provided function on each leaf page table.
                                                   >> 2111  */
                                                   >> 2112 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                                                   >> 2113                         unsigned long size, pte_fn_t fn, void *data)
2945 {                                                2114 {
2946         pgd_t *pgd;                              2115         pgd_t *pgd;
2947         unsigned long start = addr, next;     !! 2116         unsigned long next;
2948         unsigned long end = addr + size;         2117         unsigned long end = addr + size;
2949         pgtbl_mod_mask mask = 0;              !! 2118         int err;
2950         int err = 0;                          << 
2951                                                  2119 
2952         if (WARN_ON(addr >= end))                2120         if (WARN_ON(addr >= end))
2953                 return -EINVAL;                  2121                 return -EINVAL;
2954                                                  2122 
2955         pgd = pgd_offset(mm, addr);              2123         pgd = pgd_offset(mm, addr);
2956         do {                                     2124         do {
2957                 next = pgd_addr_end(addr, end    2125                 next = pgd_addr_end(addr, end);
2958                 if (pgd_none(*pgd) && !create !! 2126                 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
2959                         continue;             << 
2960                 if (WARN_ON_ONCE(pgd_leaf(*pg << 
2961                         return -EINVAL;       << 
2962                 if (!pgd_none(*pgd) && WARN_O << 
2963                         if (!create)          << 
2964                                 continue;     << 
2965                         pgd_clear_bad(pgd);   << 
2966                 }                             << 
2967                 err = apply_to_p4d_range(mm,  << 
2968                                          fn,  << 
2969                 if (err)                         2127                 if (err)
2970                         break;                   2128                         break;
2971         } while (pgd++, addr = next, addr !=     2129         } while (pgd++, addr = next, addr != end);
2972                                                  2130 
2973         if (mask & ARCH_PAGE_TABLE_SYNC_MASK) << 
2974                 arch_sync_kernel_mappings(sta << 
2975                                               << 
2976         return err;                              2131         return err;
2977 }                                                2132 }
2978                                               << 
2979 /*                                            << 
2980  * Scan a region of virtual memory, filling i << 
2981  * and calling a provided function on each le << 
2982  */                                           << 
2983 int apply_to_page_range(struct mm_struct *mm, << 
2984                         unsigned long size, p << 
2985 {                                             << 
2986         return __apply_to_page_range(mm, addr << 
2987 }                                             << 
2988 EXPORT_SYMBOL_GPL(apply_to_page_range);          2133 EXPORT_SYMBOL_GPL(apply_to_page_range);
2989                                                  2134 
2990 /*                                               2135 /*
2991  * Scan a region of virtual memory, calling a << 
2992  * each leaf page table where it exists.      << 
2993  *                                            << 
2994  * Unlike apply_to_page_range, this does _not << 
2995  * where they are absent.                     << 
2996  */                                           << 
2997 int apply_to_existing_page_range(struct mm_st << 
2998                                  unsigned lon << 
2999 {                                             << 
3000         return __apply_to_page_range(mm, addr << 
3001 }                                             << 
3002 EXPORT_SYMBOL_GPL(apply_to_existing_page_rang << 
3003                                               << 
3004 /*                                            << 
3005  * handle_pte_fault chooses page fault handle    2136  * handle_pte_fault chooses page fault handler according to an entry which was
3006  * read non-atomically.  Before making any co    2137  * read non-atomically.  Before making any commitment, on those architectures
3007  * or configurations (e.g. i386 with PAE) whi    2138  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3008  * parts, do_swap_page must check under lock     2139  * parts, do_swap_page must check under lock before unmapping the pte and
3009  * proceeding (but do_wp_page is only called     2140  * proceeding (but do_wp_page is only called after already making such a check;
3010  * and do_anonymous_page can safely check lat    2141  * and do_anonymous_page can safely check later on).
3011  */                                              2142  */
3012 static inline int pte_unmap_same(struct vm_fa !! 2143 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
                                                   >> 2144                                 pte_t *page_table, pte_t orig_pte)
3013 {                                                2145 {
3014         int same = 1;                            2146         int same = 1;
3015 #if defined(CONFIG_SMP) || defined(CONFIG_PRE !! 2147 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
3016         if (sizeof(pte_t) > sizeof(unsigned l    2148         if (sizeof(pte_t) > sizeof(unsigned long)) {
3017                 spin_lock(vmf->ptl);          !! 2149                 spinlock_t *ptl = pte_lockptr(mm, pmd);
3018                 same = pte_same(ptep_get(vmf- !! 2150                 spin_lock(ptl);
3019                 spin_unlock(vmf->ptl);        !! 2151                 same = pte_same(*page_table, orig_pte);
                                                   >> 2152                 spin_unlock(ptl);
3020         }                                        2153         }
3021 #endif                                           2154 #endif
3022         pte_unmap(vmf->pte);                  !! 2155         pte_unmap(page_table);
3023         vmf->pte = NULL;                      << 
3024         return same;                             2156         return same;
3025 }                                                2157 }
3026                                                  2158 
3027 /*                                            !! 2159 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
3028  * Return:                                    << 
3029  *      0:              copied succeeded      << 
3030  *      -EHWPOISON:     copy failed due to hw << 
3031  *      -EAGAIN:        copied failed (some o << 
3032  */                                           << 
3033 static inline int __wp_page_copy_user(struct  << 
3034                                       struct  << 
3035 {                                                2160 {
3036         int ret;                              !! 2161         debug_dma_assert_idle(src);
3037         void *kaddr;                          << 
3038         void __user *uaddr;                   << 
3039         struct vm_area_struct *vma = vmf->vma << 
3040         struct mm_struct *mm = vma->vm_mm;    << 
3041         unsigned long addr = vmf->address;    << 
3042                                               << 
3043         if (likely(src)) {                    << 
3044                 if (copy_mc_user_highpage(dst << 
3045                         return -EHWPOISON;    << 
3046                 return 0;                     << 
3047         }                                     << 
3048                                                  2162 
3049         /*                                       2163         /*
3050          * If the source page was a PFN mappi    2164          * If the source page was a PFN mapping, we don't have
3051          * a "struct page" for it. We do a be    2165          * a "struct page" for it. We do a best-effort copy by
3052          * just copying from the original use    2166          * just copying from the original user address. If that
3053          * fails, we just zero-fill it. Live     2167          * fails, we just zero-fill it. Live with it.
3054          */                                      2168          */
3055         kaddr = kmap_local_page(dst);         !! 2169         if (unlikely(!src)) {
3056         pagefault_disable();                  !! 2170                 void *kaddr = kmap_atomic(dst);
3057         uaddr = (void __user *)(addr & PAGE_M !! 2171                 void __user *uaddr = (void __user *)(va & PAGE_MASK);
3058                                               << 
3059         /*                                    << 
3060          * On architectures with software "ac << 
3061          * take a double page fault, so mark  << 
3062          */                                   << 
3063         vmf->pte = NULL;                      << 
3064         if (!arch_has_hw_pte_young() && !pte_ << 
3065                 pte_t entry;                  << 
3066                                               << 
3067                 vmf->pte = pte_offset_map_loc << 
3068                 if (unlikely(!vmf->pte || !pt << 
3069                         /*                    << 
3070                          * Other thread has a << 
3071                          * and update local t << 
3072                          */                   << 
3073                         if (vmf->pte)         << 
3074                                 update_mmu_tl << 
3075                         ret = -EAGAIN;        << 
3076                         goto pte_unlock;      << 
3077                 }                             << 
3078                                               << 
3079                 entry = pte_mkyoung(vmf->orig << 
3080                 if (ptep_set_access_flags(vma << 
3081                         update_mmu_cache_rang << 
3082         }                                     << 
3083                                               << 
3084         /*                                    << 
3085          * This really shouldn't fail, becaus << 
3086          * in the page tables. But it might j << 
3087          * in which case we just give up and  << 
3088          * zeroes.                            << 
3089          */                                   << 
3090         if (__copy_from_user_inatomic(kaddr,  << 
3091                 if (vmf->pte)                 << 
3092                         goto warn;            << 
3093                                               << 
3094                 /* Re-validate under PTL if t << 
3095                 vmf->pte = pte_offset_map_loc << 
3096                 if (unlikely(!vmf->pte || !pt << 
3097                         /* The PTE changed un << 
3098                         if (vmf->pte)         << 
3099                                 update_mmu_tl << 
3100                         ret = -EAGAIN;        << 
3101                         goto pte_unlock;      << 
3102                 }                             << 
3103                                                  2172 
3104                 /*                               2173                 /*
3105                  * The same page can be mappe !! 2174                  * This really shouldn't fail, because the page is there
3106                  * Try to copy again under PT !! 2175                  * in the page tables. But it might just be unreadable,
                                                   >> 2176                  * in which case we just give up and fill the result with
                                                   >> 2177                  * zeroes.
3107                  */                              2178                  */
3108                 if (__copy_from_user_inatomic !! 2179                 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
3109                         /*                    << 
3110                          * Give a warn in cas << 
3111                          * use-case           << 
3112                          */                   << 
3113 warn:                                         << 
3114                         WARN_ON_ONCE(1);      << 
3115                         clear_page(kaddr);       2180                         clear_page(kaddr);
3116                 }                             !! 2181                 kunmap_atomic(kaddr);
3117         }                                     !! 2182                 flush_dcache_page(dst);
3118                                               !! 2183         } else
3119         ret = 0;                              !! 2184                 copy_user_highpage(dst, src, va, vma);
3120                                               << 
3121 pte_unlock:                                   << 
3122         if (vmf->pte)                         << 
3123                 pte_unmap_unlock(vmf->pte, vm << 
3124         pagefault_enable();                   << 
3125         kunmap_local(kaddr);                  << 
3126         flush_dcache_page(dst);               << 
3127                                               << 
3128         return ret;                           << 
3129 }                                                2185 }
3130                                                  2186 
3131 static gfp_t __get_fault_gfp_mask(struct vm_a    2187 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3132 {                                                2188 {
3133         struct file *vm_file = vma->vm_file;     2189         struct file *vm_file = vma->vm_file;
3134                                                  2190 
3135         if (vm_file)                             2191         if (vm_file)
3136                 return mapping_gfp_mask(vm_fi    2192                 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3137                                                  2193 
3138         /*                                       2194         /*
3139          * Special mappings (e.g. VDSO) do no    2195          * Special mappings (e.g. VDSO) do not have any file so fake
3140          * a default GFP_KERNEL for them.        2196          * a default GFP_KERNEL for them.
3141          */                                      2197          */
3142         return GFP_KERNEL;                       2198         return GFP_KERNEL;
3143 }                                                2199 }
3144                                                  2200 
3145 /*                                               2201 /*
3146  * Notify the address space that the page is     2202  * Notify the address space that the page is about to become writable so that
3147  * it can prohibit this or wait for the page     2203  * it can prohibit this or wait for the page to get into an appropriate state.
3148  *                                               2204  *
3149  * We do this without the lock held, so that     2205  * We do this without the lock held, so that it can sleep if it needs to.
3150  */                                              2206  */
3151 static vm_fault_t do_page_mkwrite(struct vm_f !! 2207 static int do_page_mkwrite(struct vm_fault *vmf)
3152 {                                                2208 {
3153         vm_fault_t ret;                       !! 2209         int ret;
                                                   >> 2210         struct page *page = vmf->page;
3154         unsigned int old_flags = vmf->flags;     2211         unsigned int old_flags = vmf->flags;
3155                                                  2212 
3156         vmf->flags = FAULT_FLAG_WRITE|FAULT_F    2213         vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3157                                                  2214 
3158         if (vmf->vma->vm_file &&              << 
3159             IS_SWAPFILE(vmf->vma->vm_file->f_ << 
3160                 return VM_FAULT_SIGBUS;       << 
3161                                               << 
3162         ret = vmf->vma->vm_ops->page_mkwrite(    2215         ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3163         /* Restore original flags so that cal    2216         /* Restore original flags so that caller is not surprised */
3164         vmf->flags = old_flags;                  2217         vmf->flags = old_flags;
3165         if (unlikely(ret & (VM_FAULT_ERROR |     2218         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3166                 return ret;                      2219                 return ret;
3167         if (unlikely(!(ret & VM_FAULT_LOCKED)    2220         if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3168                 folio_lock(folio);            !! 2221                 lock_page(page);
3169                 if (!folio->mapping) {        !! 2222                 if (!page->mapping) {
3170                         folio_unlock(folio);  !! 2223                         unlock_page(page);
3171                         return 0; /* retry */    2224                         return 0; /* retry */
3172                 }                                2225                 }
3173                 ret |= VM_FAULT_LOCKED;          2226                 ret |= VM_FAULT_LOCKED;
3174         } else                                   2227         } else
3175                 VM_BUG_ON_FOLIO(!folio_test_l !! 2228                 VM_BUG_ON_PAGE(!PageLocked(page), page);
3176         return ret;                              2229         return ret;
3177 }                                                2230 }
3178                                                  2231 
3179 /*                                               2232 /*
3180  * Handle dirtying of a page in shared file m    2233  * Handle dirtying of a page in shared file mapping on a write fault.
3181  *                                               2234  *
3182  * The function expects the page to be locked    2235  * The function expects the page to be locked and unlocks it.
3183  */                                              2236  */
3184 static vm_fault_t fault_dirty_shared_page(str !! 2237 static void fault_dirty_shared_page(struct vm_area_struct *vma,
                                                   >> 2238                                     struct page *page)
3185 {                                                2239 {
3186         struct vm_area_struct *vma = vmf->vma << 
3187         struct address_space *mapping;           2240         struct address_space *mapping;
3188         struct folio *folio = page_folio(vmf- << 
3189         bool dirtied;                            2241         bool dirtied;
3190         bool page_mkwrite = vma->vm_ops && vm    2242         bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3191                                                  2243 
3192         dirtied = folio_mark_dirty(folio);    !! 2244         dirtied = set_page_dirty(page);
3193         VM_BUG_ON_FOLIO(folio_test_anon(folio !! 2245         VM_BUG_ON_PAGE(PageAnon(page), page);
3194         /*                                       2246         /*
3195          * Take a local copy of the address_s !! 2247          * Take a local copy of the address_space - page.mapping may be zeroed
3196          * by truncate after folio_unlock().  !! 2248          * by truncate after unlock_page().   The address_space itself remains
3197          * pinned by vma->vm_file's reference !! 2249          * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
3198          * release semantics to prevent the c    2250          * release semantics to prevent the compiler from undoing this copying.
3199          */                                      2251          */
3200         mapping = folio_raw_mapping(folio);   !! 2252         mapping = page_rmapping(page);
3201         folio_unlock(folio);                  !! 2253         unlock_page(page);
3202                                                  2254 
3203         if (!page_mkwrite)                    << 
3204                 file_update_time(vma->vm_file << 
3205                                               << 
3206         /*                                    << 
3207          * Throttle page dirtying rate down t << 
3208          *                                    << 
3209          * mapping may be NULL here because s << 
3210          * set page.mapping but still dirty t << 
3211          *                                    << 
3212          * Drop the mmap_lock before waiting  << 
3213          * is pinning the mapping, as per abo << 
3214          */                                   << 
3215         if ((dirtied || page_mkwrite) && mapp    2255         if ((dirtied || page_mkwrite) && mapping) {
3216                 struct file *fpin;            !! 2256                 /*
3217                                               !! 2257                  * Some device drivers do not set page.mapping
3218                 fpin = maybe_unlock_mmap_for_ !! 2258                  * but still dirty their pages
                                                   >> 2259                  */
3219                 balance_dirty_pages_ratelimit    2260                 balance_dirty_pages_ratelimited(mapping);
3220                 if (fpin) {                   << 
3221                         fput(fpin);           << 
3222                         return VM_FAULT_COMPL << 
3223                 }                             << 
3224         }                                        2261         }
3225                                                  2262 
3226         return 0;                             !! 2263         if (!page_mkwrite)
                                                   >> 2264                 file_update_time(vma->vm_file);
3227 }                                                2265 }
3228                                                  2266 
3229 /*                                               2267 /*
3230  * Handle write page faults for pages that ca    2268  * Handle write page faults for pages that can be reused in the current vma
3231  *                                               2269  *
3232  * This can happen either due to the mapping     2270  * This can happen either due to the mapping being with the VM_SHARED flag,
3233  * or due to us being the last reference stan    2271  * or due to us being the last reference standing to the page. In either
3234  * case, all we need to do here is to mark th    2272  * case, all we need to do here is to mark the page as writable and update
3235  * any related book-keeping.                     2273  * any related book-keeping.
3236  */                                              2274  */
3237 static inline void wp_page_reuse(struct vm_fa !! 2275 static inline void wp_page_reuse(struct vm_fault *vmf)
3238         __releases(vmf->ptl)                     2276         __releases(vmf->ptl)
3239 {                                                2277 {
3240         struct vm_area_struct *vma = vmf->vma    2278         struct vm_area_struct *vma = vmf->vma;
                                                   >> 2279         struct page *page = vmf->page;
3241         pte_t entry;                             2280         pte_t entry;
3242                                               !! 2281         /*
3243         VM_BUG_ON(!(vmf->flags & FAULT_FLAG_W !! 2282          * Clear the pages cpupid information as the existing
3244         VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->o !! 2283          * information potentially belongs to a now completely
3245                                               !! 2284          * unrelated process.
3246         if (folio) {                          !! 2285          */
3247                 VM_BUG_ON(folio_test_anon(fol !! 2286         if (page)
3248                           !PageAnonExclusive( !! 2287                 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3249                 /*                            << 
3250                  * Clear the folio's cpupid i << 
3251                  * information potentially be << 
3252                  * unrelated process.         << 
3253                  */                           << 
3254                 folio_xchg_last_cpupid(folio, << 
3255         }                                     << 
3256                                                  2288 
3257         flush_cache_page(vma, vmf->address, p    2289         flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3258         entry = pte_mkyoung(vmf->orig_pte);      2290         entry = pte_mkyoung(vmf->orig_pte);
3259         entry = maybe_mkwrite(pte_mkdirty(ent    2291         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3260         if (ptep_set_access_flags(vma, vmf->a    2292         if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3261                 update_mmu_cache_range(vmf, v !! 2293                 update_mmu_cache(vma, vmf->address, vmf->pte);
3262         pte_unmap_unlock(vmf->pte, vmf->ptl);    2294         pte_unmap_unlock(vmf->pte, vmf->ptl);
3263         count_vm_event(PGREUSE);              << 
3264 }                                                2295 }
3265                                                  2296 
3266 /*                                               2297 /*
3267  * We could add a bitflag somewhere, but for  !! 2298  * Handle the case of a page which we actually need to copy to a new page.
3268  * vm_ops that have a ->map_pages have been a << 
3269  * the mmap_lock to be held.                  << 
3270  */                                           << 
3271 static inline vm_fault_t vmf_can_call_fault(c << 
3272 {                                             << 
3273         struct vm_area_struct *vma = vmf->vma << 
3274                                               << 
3275         if (vma->vm_ops->map_pages || !(vmf-> << 
3276                 return 0;                     << 
3277         vma_end_read(vma);                    << 
3278         return VM_FAULT_RETRY;                << 
3279 }                                             << 
3280                                               << 
3281 /**                                           << 
3282  * __vmf_anon_prepare - Prepare to handle an  << 
3283  * @vmf: The vm_fault descriptor passed from  << 
3284  *                                               2299  *
3285  * When preparing to insert an anonymous page !! 2300  * Called with mmap_sem locked and the old page referenced, but
3286  * fault handler, call this function rather t << 
3287  * If this vma does not already have an assoc << 
3288  * only protected by the per-VMA lock, the ca << 
3289  * mmap_lock held.  __anon_vma_prepare() will << 
3290  * determine if this VMA can share its anon_v << 
3291  * do with only the per-VMA lock held for thi << 
3292  *                                            << 
3293  * Return: 0 if fault handling can proceed.   << 
3294  * returned to the caller.                    << 
3295  */                                           << 
3296 vm_fault_t __vmf_anon_prepare(struct vm_fault << 
3297 {                                             << 
3298         struct vm_area_struct *vma = vmf->vma << 
3299         vm_fault_t ret = 0;                   << 
3300                                               << 
3301         if (likely(vma->anon_vma))            << 
3302                 return 0;                     << 
3303         if (vmf->flags & FAULT_FLAG_VMA_LOCK) << 
3304                 if (!mmap_read_trylock(vma->v << 
3305                         return VM_FAULT_RETRY << 
3306         }                                     << 
3307         if (__anon_vma_prepare(vma))          << 
3308                 ret = VM_FAULT_OOM;           << 
3309         if (vmf->flags & FAULT_FLAG_VMA_LOCK) << 
3310                 mmap_read_unlock(vma->vm_mm); << 
3311         return ret;                           << 
3312 }                                             << 
3313                                               << 
3314 /*                                            << 
3315  * Handle the case of a page which we actuall << 
3316  * either due to COW or unsharing.            << 
3317  *                                            << 
3318  * Called with mmap_lock locked and the old p << 
3319  * without the ptl held.                         2301  * without the ptl held.
3320  *                                               2302  *
3321  * High level logic flow:                        2303  * High level logic flow:
3322  *                                               2304  *
3323  * - Allocate a page, copy the content of the    2305  * - Allocate a page, copy the content of the old page to the new one.
3324  * - Handle book keeping and accounting - cgr    2306  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3325  * - Take the PTL. If the pte changed, bail o    2307  * - Take the PTL. If the pte changed, bail out and release the allocated page
3326  * - If the pte is still the way we remember     2308  * - If the pte is still the way we remember it, update the page table and all
3327  *   relevant references. This includes dropp    2309  *   relevant references. This includes dropping the reference the page-table
3328  *   held to the old page, as well as updatin    2310  *   held to the old page, as well as updating the rmap.
3329  * - In any case, unlock the PTL and drop the    2311  * - In any case, unlock the PTL and drop the reference we took to the old page.
3330  */                                              2312  */
3331 static vm_fault_t wp_page_copy(struct vm_faul !! 2313 static int wp_page_copy(struct vm_fault *vmf)
3332 {                                                2314 {
3333         const bool unshare = vmf->flags & FAU << 
3334         struct vm_area_struct *vma = vmf->vma    2315         struct vm_area_struct *vma = vmf->vma;
3335         struct mm_struct *mm = vma->vm_mm;       2316         struct mm_struct *mm = vma->vm_mm;
3336         struct folio *old_folio = NULL;       !! 2317         struct page *old_page = vmf->page;
3337         struct folio *new_folio = NULL;       !! 2318         struct page *new_page = NULL;
3338         pte_t entry;                             2319         pte_t entry;
3339         int page_copied = 0;                     2320         int page_copied = 0;
3340         struct mmu_notifier_range range;      !! 2321         const unsigned long mmun_start = vmf->address & PAGE_MASK;
3341         vm_fault_t ret;                       !! 2322         const unsigned long mmun_end = mmun_start + PAGE_SIZE;
3342         bool pfn_is_zero;                     !! 2323         struct mem_cgroup *memcg;
3343                                               << 
3344         delayacct_wpcopy_start();             << 
3345                                               << 
3346         if (vmf->page)                        << 
3347                 old_folio = page_folio(vmf->p << 
3348         ret = vmf_anon_prepare(vmf);          << 
3349         if (unlikely(ret))                    << 
3350                 goto out;                     << 
3351                                                  2324 
3352         pfn_is_zero = is_zero_pfn(pte_pfn(vmf !! 2325         if (unlikely(anon_vma_prepare(vma)))
3353         new_folio = folio_prealloc(mm, vma, v << 
3354         if (!new_folio)                       << 
3355                 goto oom;                        2326                 goto oom;
3356                                                  2327 
3357         if (!pfn_is_zero) {                   !! 2328         if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3358                 int err;                      !! 2329                 new_page = alloc_zeroed_user_highpage_movable(vma,
3359                                               !! 2330                                                               vmf->address);
3360                 err = __wp_page_copy_user(&ne !! 2331                 if (!new_page)
3361                 if (err) {                    !! 2332                         goto oom;
3362                         /*                    !! 2333         } else {
3363                          * COW failed, if the !! 2334                 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3364                          * it's fine. If not, !! 2335                                 vmf->address);
3365                          * the same address a !! 2336                 if (!new_page)
3366                          * from the second at !! 2337                         goto oom;
3367                          * The -EHWPOISON cas !! 2338                 cow_user_page(new_page, old_page, vmf->address, vma);
3368                          */                   << 
3369                         folio_put(new_folio); << 
3370                         if (old_folio)        << 
3371                                 folio_put(old << 
3372                                               << 
3373                         delayacct_wpcopy_end( << 
3374                         return err == -EHWPOI << 
3375                 }                             << 
3376                 kmsan_copy_page_meta(&new_fol << 
3377         }                                        2339         }
3378                                                  2340 
3379         __folio_mark_uptodate(new_folio);     !! 2341         if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
                                                   >> 2342                 goto oom_free_new;
                                                   >> 2343 
                                                   >> 2344         __SetPageUptodate(new_page);
3380                                                  2345 
3381         mmu_notifier_range_init(&range, MMU_N !! 2346         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3382                                 vmf->address  << 
3383                                 (vmf->address << 
3384         mmu_notifier_invalidate_range_start(& << 
3385                                                  2347 
3386         /*                                       2348         /*
3387          * Re-check the pte - we dropped the     2349          * Re-check the pte - we dropped the lock
3388          */                                      2350          */
3389         vmf->pte = pte_offset_map_lock(mm, vm    2351         vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3390         if (likely(vmf->pte && pte_same(ptep_ !! 2352         if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3391                 if (old_folio) {              !! 2353                 if (old_page) {
3392                         if (!folio_test_anon( !! 2354                         if (!PageAnon(old_page)) {
3393                                 dec_mm_counte !! 2355                                 dec_mm_counter_fast(mm,
3394                                 inc_mm_counte !! 2356                                                 mm_counter_file(old_page));
                                                   >> 2357                                 inc_mm_counter_fast(mm, MM_ANONPAGES);
3395                         }                        2358                         }
3396                 } else {                         2359                 } else {
3397                         ksm_might_unmap_zero_ !! 2360                         inc_mm_counter_fast(mm, MM_ANONPAGES);
3398                         inc_mm_counter(mm, MM << 
3399                 }                                2361                 }
3400                 flush_cache_page(vma, vmf->ad    2362                 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3401                 entry = mk_pte(&new_folio->pa !! 2363                 entry = mk_pte(new_page, vma->vm_page_prot);
3402                 entry = pte_sw_mkyoung(entry) !! 2364                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3403                 if (unlikely(unshare)) {      << 
3404                         if (pte_soft_dirty(vm << 
3405                                 entry = pte_m << 
3406                         if (pte_uffd_wp(vmf-> << 
3407                                 entry = pte_m << 
3408                 } else {                      << 
3409                         entry = maybe_mkwrite << 
3410                 }                             << 
3411                                               << 
3412                 /*                               2365                 /*
3413                  * Clear the pte entry and fl    2366                  * Clear the pte entry and flush it first, before updating the
3414                  * pte with the new entry, to !! 2367                  * pte with the new entry. This will avoid a race condition
3415                  * sync. This code used to se !! 2368                  * seen in the presence of one thread doing SMC and another
3416                  * that left a window where t !! 2369                  * thread doing COW.
3417                  * some TLBs while the old PT << 
3418                  */                              2370                  */
3419                 ptep_clear_flush(vma, vmf->ad !! 2371                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3420                 folio_add_new_anon_rmap(new_f !! 2372                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
3421                 folio_add_lru_vma(new_folio,  !! 2373                 mem_cgroup_commit_charge(new_page, memcg, false, false);
3422                 BUG_ON(unshare && pte_write(e !! 2374                 lru_cache_add_active_or_unevictable(new_page, vma);
3423                 set_pte_at(mm, vmf->address,  !! 2375                 /*
3424                 update_mmu_cache_range(vmf, v !! 2376                  * We call the notify macro here because, when using secondary
3425                 if (old_folio) {              !! 2377                  * mmu page tables (such as kvm shadow page tables), we want the
                                                   >> 2378                  * new page to be mapped directly into the secondary page table.
                                                   >> 2379                  */
                                                   >> 2380                 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
                                                   >> 2381                 update_mmu_cache(vma, vmf->address, vmf->pte);
                                                   >> 2382                 if (old_page) {
3426                         /*                       2383                         /*
3427                          * Only after switchi    2384                          * Only after switching the pte to the new page may
3428                          * we remove the mapc    2385                          * we remove the mapcount here. Otherwise another
3429                          * process may come a    2386                          * process may come and find the rmap count decremented
3430                          * before the pte is     2387                          * before the pte is switched to the new page, and
3431                          * "reuse" the old pa    2388                          * "reuse" the old page writing into it while our pte
3432                          * here still points     2389                          * here still points into it and can be read by other
3433                          * threads.              2390                          * threads.
3434                          *                       2391                          *
3435                          * The critical issue    2392                          * The critical issue is to order this
3436                          * folio_remove_rmap_ !! 2393                          * page_remove_rmap with the ptp_clear_flush above.
3437                          * above. Those store !! 2394                          * Those stores are ordered by (if nothing else,)
3438                          * the barrier presen    2395                          * the barrier present in the atomic_add_negative
3439                          * in folio_remove_rm !! 2396                          * in page_remove_rmap.
3440                          *                       2397                          *
3441                          * Then the TLB flush    2398                          * Then the TLB flush in ptep_clear_flush ensures that
3442                          * no process can acc    2399                          * no process can access the old page before the
3443                          * decremented mapcou    2400                          * decremented mapcount is visible. And the old page
3444                          * cannot be reused u    2401                          * cannot be reused until after the decremented
3445                          * mapcount is visibl    2402                          * mapcount is visible. So transitively, TLBs to
3446                          * old page will be f    2403                          * old page will be flushed before it can be reused.
3447                          */                      2404                          */
3448                         folio_remove_rmap_pte !! 2405                         page_remove_rmap(old_page, false);
3449                 }                                2406                 }
3450                                                  2407 
3451                 /* Free the old page.. */        2408                 /* Free the old page.. */
3452                 new_folio = old_folio;        !! 2409                 new_page = old_page;
3453                 page_copied = 1;                 2410                 page_copied = 1;
3454                 pte_unmap_unlock(vmf->pte, vm !! 2411         } else {
3455         } else if (vmf->pte) {                !! 2412                 mem_cgroup_cancel_charge(new_page, memcg, false);
3456                 update_mmu_tlb(vma, vmf->addr << 
3457                 pte_unmap_unlock(vmf->pte, vm << 
3458         }                                        2413         }
3459                                                  2414 
3460         mmu_notifier_invalidate_range_end(&ra !! 2415         if (new_page)
3461                                               !! 2416                 put_page(new_page);
3462         if (new_folio)                        << 
3463                 folio_put(new_folio);         << 
3464         if (old_folio) {                      << 
3465                 if (page_copied)              << 
3466                         free_swap_cache(old_f << 
3467                 folio_put(old_folio);         << 
3468         }                                     << 
3469                                                  2417 
3470         delayacct_wpcopy_end();               !! 2418         pte_unmap_unlock(vmf->pte, vmf->ptl);
3471         return 0;                             !! 2419         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
                                                   >> 2420         if (old_page) {
                                                   >> 2421                 /*
                                                   >> 2422                  * Don't let another task, with possibly unlocked vma,
                                                   >> 2423                  * keep the mlocked page.
                                                   >> 2424                  */
                                                   >> 2425                 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
                                                   >> 2426                         lock_page(old_page);    /* LRU manipulation */
                                                   >> 2427                         if (PageMlocked(old_page))
                                                   >> 2428                                 munlock_vma_page(old_page);
                                                   >> 2429                         unlock_page(old_page);
                                                   >> 2430                 }
                                                   >> 2431                 put_page(old_page);
                                                   >> 2432         }
                                                   >> 2433         return page_copied ? VM_FAULT_WRITE : 0;
                                                   >> 2434 oom_free_new:
                                                   >> 2435         put_page(new_page);
3472 oom:                                             2436 oom:
3473         ret = VM_FAULT_OOM;                   !! 2437         if (old_page)
3474 out:                                          !! 2438                 put_page(old_page);
3475         if (old_folio)                        !! 2439         return VM_FAULT_OOM;
3476                 folio_put(old_folio);         << 
3477                                               << 
3478         delayacct_wpcopy_end();               << 
3479         return ret;                           << 
3480 }                                                2440 }
3481                                                  2441 
3482 /**                                              2442 /**
3483  * finish_mkwrite_fault - finish page fault f    2443  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3484  *                        writeable once the     2444  *                        writeable once the page is prepared
3485  *                                               2445  *
3486  * @vmf: structure describing the fault          2446  * @vmf: structure describing the fault
3487  * @folio: the folio of vmf->page             << 
3488  *                                               2447  *
3489  * This function handles all that is needed t    2448  * This function handles all that is needed to finish a write page fault in a
3490  * shared mapping due to PTE being read-only     2449  * shared mapping due to PTE being read-only once the mapped page is prepared.
3491  * It handles locking of PTE and modifying it !! 2450  * It handles locking of PTE and modifying it. The function returns
                                                   >> 2451  * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
                                                   >> 2452  * lock.
3492  *                                               2453  *
3493  * The function expects the page to be locked    2454  * The function expects the page to be locked or other protection against
3494  * concurrent faults / writeback (such as DAX    2455  * concurrent faults / writeback (such as DAX radix tree locks).
3495  *                                            << 
3496  * Return: %0 on success, %VM_FAULT_NOPAGE wh << 
3497  * we acquired PTE lock.                      << 
3498  */                                              2456  */
3499 static vm_fault_t finish_mkwrite_fault(struct !! 2457 int finish_mkwrite_fault(struct vm_fault *vmf)
3500 {                                                2458 {
3501         WARN_ON_ONCE(!(vmf->vma->vm_flags & V    2459         WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3502         vmf->pte = pte_offset_map_lock(vmf->v    2460         vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3503                                        &vmf->    2461                                        &vmf->ptl);
3504         if (!vmf->pte)                        << 
3505                 return VM_FAULT_NOPAGE;       << 
3506         /*                                       2462         /*
3507          * We might have raced with another p    2463          * We might have raced with another page fault while we released the
3508          * pte_offset_map_lock.                  2464          * pte_offset_map_lock.
3509          */                                      2465          */
3510         if (!pte_same(ptep_get(vmf->pte), vmf !! 2466         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3511                 update_mmu_tlb(vmf->vma, vmf- << 
3512                 pte_unmap_unlock(vmf->pte, vm    2467                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3513                 return VM_FAULT_NOPAGE;          2468                 return VM_FAULT_NOPAGE;
3514         }                                        2469         }
3515         wp_page_reuse(vmf, folio);            !! 2470         wp_page_reuse(vmf);
3516         return 0;                                2471         return 0;
3517 }                                                2472 }
3518                                                  2473 
3519 /*                                               2474 /*
3520  * Handle write page faults for VM_MIXEDMAP o    2475  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3521  * mapping                                       2476  * mapping
3522  */                                              2477  */
3523 static vm_fault_t wp_pfn_shared(struct vm_fau !! 2478 static int wp_pfn_shared(struct vm_fault *vmf)
3524 {                                                2479 {
3525         struct vm_area_struct *vma = vmf->vma    2480         struct vm_area_struct *vma = vmf->vma;
3526                                                  2481 
3527         if (vma->vm_ops && vma->vm_ops->pfn_m    2482         if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3528                 vm_fault_t ret;               !! 2483                 int ret;
3529                                                  2484 
3530                 pte_unmap_unlock(vmf->pte, vm    2485                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3531                 ret = vmf_can_call_fault(vmf) << 
3532                 if (ret)                      << 
3533                         return ret;           << 
3534                                               << 
3535                 vmf->flags |= FAULT_FLAG_MKWR    2486                 vmf->flags |= FAULT_FLAG_MKWRITE;
3536                 ret = vma->vm_ops->pfn_mkwrit    2487                 ret = vma->vm_ops->pfn_mkwrite(vmf);
3537                 if (ret & (VM_FAULT_ERROR | V    2488                 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3538                         return ret;              2489                         return ret;
3539                 return finish_mkwrite_fault(v !! 2490                 return finish_mkwrite_fault(vmf);
3540         }                                        2491         }
3541         wp_page_reuse(vmf, NULL);             !! 2492         wp_page_reuse(vmf);
3542         return 0;                             !! 2493         return VM_FAULT_WRITE;
3543 }                                                2494 }
3544                                                  2495 
3545 static vm_fault_t wp_page_shared(struct vm_fa !! 2496 static int wp_page_shared(struct vm_fault *vmf)
3546         __releases(vmf->ptl)                     2497         __releases(vmf->ptl)
3547 {                                                2498 {
3548         struct vm_area_struct *vma = vmf->vma    2499         struct vm_area_struct *vma = vmf->vma;
3549         vm_fault_t ret = 0;                   << 
3550                                                  2500 
3551         folio_get(folio);                     !! 2501         get_page(vmf->page);
3552                                                  2502 
3553         if (vma->vm_ops && vma->vm_ops->page_    2503         if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3554                 vm_fault_t tmp;               !! 2504                 int tmp;
3555                                                  2505 
3556                 pte_unmap_unlock(vmf->pte, vm    2506                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3557                 tmp = vmf_can_call_fault(vmf) !! 2507                 tmp = do_page_mkwrite(vmf);
3558                 if (tmp) {                    << 
3559                         folio_put(folio);     << 
3560                         return tmp;           << 
3561                 }                             << 
3562                                               << 
3563                 tmp = do_page_mkwrite(vmf, fo << 
3564                 if (unlikely(!tmp || (tmp &      2508                 if (unlikely(!tmp || (tmp &
3565                                       (VM_FAU    2509                                       (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3566                         folio_put(folio);     !! 2510                         put_page(vmf->page);
3567                         return tmp;              2511                         return tmp;
3568                 }                                2512                 }
3569                 tmp = finish_mkwrite_fault(vm !! 2513                 tmp = finish_mkwrite_fault(vmf);
3570                 if (unlikely(tmp & (VM_FAULT_    2514                 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3571                         folio_unlock(folio);  !! 2515                         unlock_page(vmf->page);
3572                         folio_put(folio);     !! 2516                         put_page(vmf->page);
3573                         return tmp;              2517                         return tmp;
3574                 }                                2518                 }
3575         } else {                                 2519         } else {
3576                 wp_page_reuse(vmf, folio);    !! 2520                 wp_page_reuse(vmf);
3577                 folio_lock(folio);            !! 2521                 lock_page(vmf->page);
3578         }                                        2522         }
3579         ret |= fault_dirty_shared_page(vmf);  !! 2523         fault_dirty_shared_page(vma, vmf->page);
3580         folio_put(folio);                     !! 2524         put_page(vmf->page);
3581                                               << 
3582         return ret;                           << 
3583 }                                             << 
3584                                               << 
3585 static bool wp_can_reuse_anon_folio(struct fo << 
3586                                     struct vm << 
3587 {                                             << 
3588         /*                                    << 
3589          * We could currently only reuse a su << 
3590          * other subpages of the large folios << 
3591          * let's just consistently not reuse  << 
3592          * reuse in that scenario, and give b << 
3593          * sooner.                            << 
3594          */                                   << 
3595         if (folio_test_large(folio))          << 
3596                 return false;                 << 
3597                                                  2525 
3598         /*                                    !! 2526         return VM_FAULT_WRITE;
3599          * We have to verify under folio lock << 
3600          * just an optimization to avoid lock << 
3601          * the swapcache if there is little h << 
3602          *                                    << 
3603          * KSM doesn't necessarily raise the  << 
3604          */                                   << 
3605         if (folio_test_ksm(folio) || folio_re << 
3606                 return false;                 << 
3607         if (!folio_test_lru(folio))           << 
3608                 /*                            << 
3609                  * We cannot easily detect+ha << 
3610                  * remote LRU caches or refer << 
3611                  */                           << 
3612                 lru_add_drain();              << 
3613         if (folio_ref_count(folio) > 1 + foli << 
3614                 return false;                 << 
3615         if (!folio_trylock(folio))            << 
3616                 return false;                 << 
3617         if (folio_test_swapcache(folio))      << 
3618                 folio_free_swap(folio);       << 
3619         if (folio_test_ksm(folio) || folio_re << 
3620                 folio_unlock(folio);          << 
3621                 return false;                 << 
3622         }                                     << 
3623         /*                                    << 
3624          * Ok, we've got the only folio refer << 
3625          * and the folio is locked, it's dark << 
3626          * sunglasses. Hit it.                << 
3627          */                                   << 
3628         folio_move_anon_rmap(folio, vma);     << 
3629         folio_unlock(folio);                  << 
3630         return true;                          << 
3631 }                                                2527 }
3632                                                  2528 
3633 /*                                               2529 /*
3634  * This routine handles present pages, when   !! 2530  * This routine handles present pages, when users try to write
3635  * * users try to write to a shared page (FAU !! 2531  * to a shared page. It is done by copying the page to a new address
3636  * * GUP wants to take a R/O pin on a possibl !! 2532  * and decrementing the shared-page counter for the old page.
3637  *   (FAULT_FLAG_UNSHARE)                     << 
3638  *                                            << 
3639  * It is done by copying the page to a new ad << 
3640  * shared-page counter for the old page.      << 
3641  *                                               2533  *
3642  * Note that this routine assumes that the pr    2534  * Note that this routine assumes that the protection checks have been
3643  * done by the caller (the low-level page fau    2535  * done by the caller (the low-level page fault routine in most cases).
3644  * Thus, with FAULT_FLAG_WRITE, we can safely !! 2536  * Thus we can safely just mark it writable once we've done any necessary
3645  * done any necessary COW.                    !! 2537  * COW.
3646  *                                               2538  *
3647  * In case of FAULT_FLAG_WRITE, we also mark  !! 2539  * We also mark the page dirty at this point even though the page will
3648  * though the page will change only once the  !! 2540  * change only once the write actually happens. This avoids a few races,
3649  * avoids a few races, and potentially makes  !! 2541  * and potentially makes it more efficient.
3650  *                                               2542  *
3651  * We enter with non-exclusive mmap_lock (to  !! 2543  * We enter with non-exclusive mmap_sem (to exclude vma changes,
3652  * but allow concurrent faults), with pte bot    2544  * but allow concurrent faults), with pte both mapped and locked.
3653  * We return with mmap_lock still held, but p !! 2545  * We return with mmap_sem still held, but pte unmapped and unlocked.
3654  */                                              2546  */
3655 static vm_fault_t do_wp_page(struct vm_fault  !! 2547 static int do_wp_page(struct vm_fault *vmf)
3656         __releases(vmf->ptl)                     2548         __releases(vmf->ptl)
3657 {                                                2549 {
3658         const bool unshare = vmf->flags & FAU << 
3659         struct vm_area_struct *vma = vmf->vma    2550         struct vm_area_struct *vma = vmf->vma;
3660         struct folio *folio = NULL;           << 
3661         pte_t pte;                            << 
3662                                               << 
3663         if (likely(!unshare)) {               << 
3664                 if (userfaultfd_pte_wp(vma, p << 
3665                         if (!userfaultfd_wp_a << 
3666                                 pte_unmap_unl << 
3667                                 return handle << 
3668                         }                     << 
3669                                               << 
3670                         /*                    << 
3671                          * Nothing needed (ca << 
3672                          * etc.) because we'r << 
3673                          * which is completel << 
3674                          */                   << 
3675                         pte = pte_clear_uffd_ << 
3676                                               << 
3677                         set_pte_at(vma->vm_mm << 
3678                         /*                    << 
3679                          * Update this to be  << 
3680                          * handling           << 
3681                          */                   << 
3682                         vmf->orig_pte = pte;  << 
3683                 }                             << 
3684                                               << 
3685                 /*                            << 
3686                  * Userfaultfd write-protect  << 
3687                  * is flushed in this case be << 
3688                  */                           << 
3689                 if (unlikely(userfaultfd_wp(v << 
3690                              mm_tlb_flush_pen << 
3691                         flush_tlb_page(vmf->v << 
3692         }                                     << 
3693                                                  2551 
3694         vmf->page = vm_normal_page(vma, vmf->    2552         vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3695                                               !! 2553         if (!vmf->page) {
3696         if (vmf->page)                        << 
3697                 folio = page_folio(vmf->page) << 
3698                                               << 
3699         /*                                    << 
3700          * Shared mapping: we are guaranteed  << 
3701          * FAULT_FLAG_WRITE set at this point << 
3702          */                                   << 
3703         if (vma->vm_flags & (VM_SHARED | VM_M << 
3704                 /*                               2554                 /*
3705                  * VM_MIXEDMAP !pfn_valid() c    2555                  * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3706                  * VM_PFNMAP VMA.                2556                  * VM_PFNMAP VMA.
3707                  *                               2557                  *
3708                  * We should not cow pages in    2558                  * We should not cow pages in a shared writeable mapping.
3709                  * Just mark the pages writab    2559                  * Just mark the pages writable and/or call ops->pfn_mkwrite.
3710                  */                              2560                  */
3711                 if (!vmf->page)               !! 2561                 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                                   >> 2562                                      (VM_WRITE|VM_SHARED))
3712                         return wp_pfn_shared(    2563                         return wp_pfn_shared(vmf);
3713                 return wp_page_shared(vmf, fo !! 2564 
                                                   >> 2565                 pte_unmap_unlock(vmf->pte, vmf->ptl);
                                                   >> 2566                 return wp_page_copy(vmf);
3714         }                                        2567         }
3715                                                  2568 
3716         /*                                       2569         /*
3717          * Private mapping: create an exclusi !! 2570          * Take out anonymous pages first, anonymous shared vmas are
3718          * is impossible. We might miss VM_WR !! 2571          * not dirty accountable.
3719          *                                    << 
3720          * If we encounter a page that is mar << 
3721          * the page without further checks.   << 
3722          */                                      2572          */
3723         if (folio && folio_test_anon(folio) & !! 2573         if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
3724             (PageAnonExclusive(vmf->page) ||  !! 2574                 int total_mapcount;
3725                 if (!PageAnonExclusive(vmf->p !! 2575                 if (!trylock_page(vmf->page)) {
3726                         SetPageAnonExclusive( !! 2576                         get_page(vmf->page);
3727                 if (unlikely(unshare)) {      << 
3728                         pte_unmap_unlock(vmf-    2577                         pte_unmap_unlock(vmf->pte, vmf->ptl);
3729                         return 0;             !! 2578                         lock_page(vmf->page);
                                                   >> 2579                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
                                                   >> 2580                                         vmf->address, &vmf->ptl);
                                                   >> 2581                         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
                                                   >> 2582                                 unlock_page(vmf->page);
                                                   >> 2583                                 pte_unmap_unlock(vmf->pte, vmf->ptl);
                                                   >> 2584                                 put_page(vmf->page);
                                                   >> 2585                                 return 0;
                                                   >> 2586                         }
                                                   >> 2587                         put_page(vmf->page);
3730                 }                                2588                 }
3731                 wp_page_reuse(vmf, folio);    !! 2589                 if (reuse_swap_page(vmf->page, &total_mapcount)) {
3732                 return 0;                     !! 2590                         if (total_mapcount == 1) {
                                                   >> 2591                                 /*
                                                   >> 2592                                  * The page is all ours. Move it to
                                                   >> 2593                                  * our anon_vma so the rmap code will
                                                   >> 2594                                  * not search our parent or siblings.
                                                   >> 2595                                  * Protected against the rmap code by
                                                   >> 2596                                  * the page lock.
                                                   >> 2597                                  */
                                                   >> 2598                                 page_move_anon_rmap(vmf->page, vma);
                                                   >> 2599                         }
                                                   >> 2600                         unlock_page(vmf->page);
                                                   >> 2601                         wp_page_reuse(vmf);
                                                   >> 2602                         return VM_FAULT_WRITE;
                                                   >> 2603                 }
                                                   >> 2604                 unlock_page(vmf->page);
                                                   >> 2605         } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                                   >> 2606                                         (VM_WRITE|VM_SHARED))) {
                                                   >> 2607                 return wp_page_shared(vmf);
3733         }                                        2608         }
                                                   >> 2609 
3734         /*                                       2610         /*
3735          * Ok, we need to copy. Oh, well..       2611          * Ok, we need to copy. Oh, well..
3736          */                                      2612          */
3737         if (folio)                            !! 2613         get_page(vmf->page);
3738                 folio_get(folio);             << 
3739                                                  2614 
3740         pte_unmap_unlock(vmf->pte, vmf->ptl);    2615         pte_unmap_unlock(vmf->pte, vmf->ptl);
3741 #ifdef CONFIG_KSM                             << 
3742         if (folio && folio_test_ksm(folio))   << 
3743                 count_vm_event(COW_KSM);      << 
3744 #endif                                        << 
3745         return wp_page_copy(vmf);                2616         return wp_page_copy(vmf);
3746 }                                                2617 }
3747                                                  2618 
3748 static void unmap_mapping_range_vma(struct vm    2619 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3749                 unsigned long start_addr, uns    2620                 unsigned long start_addr, unsigned long end_addr,
3750                 struct zap_details *details)     2621                 struct zap_details *details)
3751 {                                                2622 {
3752         zap_page_range_single(vma, start_addr    2623         zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3753 }                                                2624 }
3754                                                  2625 
3755 static inline void unmap_mapping_range_tree(s !! 2626 static inline void unmap_mapping_range_tree(struct rb_root *root,
3756                                             p << 
3757                                             p << 
3758                                             s    2627                                             struct zap_details *details)
3759 {                                                2628 {
3760         struct vm_area_struct *vma;              2629         struct vm_area_struct *vma;
3761         pgoff_t vba, vea, zba, zea;              2630         pgoff_t vba, vea, zba, zea;
3762                                                  2631 
3763         vma_interval_tree_foreach(vma, root,  !! 2632         vma_interval_tree_foreach(vma, root,
                                                   >> 2633                         details->first_index, details->last_index) {
                                                   >> 2634 
3764                 vba = vma->vm_pgoff;             2635                 vba = vma->vm_pgoff;
3765                 vea = vba + vma_pages(vma) -     2636                 vea = vba + vma_pages(vma) - 1;
3766                 zba = max(first_index, vba);  !! 2637                 zba = details->first_index;
3767                 zea = min(last_index, vea);   !! 2638                 if (zba < vba)
                                                   >> 2639                         zba = vba;
                                                   >> 2640                 zea = details->last_index;
                                                   >> 2641                 if (zea > vea)
                                                   >> 2642                         zea = vea;
3768                                                  2643 
3769                 unmap_mapping_range_vma(vma,     2644                 unmap_mapping_range_vma(vma,
3770                         ((zba - vba) << PAGE_    2645                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3771                         ((zea - vba + 1) << P    2646                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3772                                 details);        2647                                 details);
3773         }                                        2648         }
3774 }                                                2649 }
3775                                                  2650 
3776 /**                                              2651 /**
3777  * unmap_mapping_folio() - Unmap single folio << 
3778  * @folio: The locked folio to be unmapped.   << 
3779  *                                            << 
3780  * Unmap this folio from any userspace proces << 
3781  * Typically, for efficiency, the range of ne << 
3782  * unmapped by unmap_mapping_pages() or unmap << 
3783  * truncation or invalidation holds the lock  << 
3784  * the page has been remapped again: and then << 
3785  * to unmap it finally.                       << 
3786  */                                           << 
3787 void unmap_mapping_folio(struct folio *folio) << 
3788 {                                             << 
3789         struct address_space *mapping = folio << 
3790         struct zap_details details = { };     << 
3791         pgoff_t first_index;                  << 
3792         pgoff_t last_index;                   << 
3793                                               << 
3794         VM_BUG_ON(!folio_test_locked(folio)); << 
3795                                               << 
3796         first_index = folio->index;           << 
3797         last_index = folio_next_index(folio)  << 
3798                                               << 
3799         details.even_cows = false;            << 
3800         details.single_folio = folio;         << 
3801         details.zap_flags = ZAP_FLAG_DROP_MAR << 
3802                                               << 
3803         i_mmap_lock_read(mapping);            << 
3804         if (unlikely(!RB_EMPTY_ROOT(&mapping- << 
3805                 unmap_mapping_range_tree(&map << 
3806                                          last << 
3807         i_mmap_unlock_read(mapping);          << 
3808 }                                             << 
3809                                               << 
3810 /**                                           << 
3811  * unmap_mapping_pages() - Unmap pages from p << 
3812  * @mapping: The address space containing pag << 
3813  * @start: Index of first page to be unmapped << 
3814  * @nr: Number of pages to be unmapped.  0 to << 
3815  * @even_cows: Whether to unmap even private  << 
3816  *                                            << 
3817  * Unmap the pages in this address space from << 
3818  * has them mmaped.  Generally, you want to r << 
3819  * a file is being truncated, but not when in << 
3820  * cache.                                     << 
3821  */                                           << 
3822 void unmap_mapping_pages(struct address_space << 
3823                 pgoff_t nr, bool even_cows)   << 
3824 {                                             << 
3825         struct zap_details details = { };     << 
3826         pgoff_t first_index = start;          << 
3827         pgoff_t last_index = start + nr - 1;  << 
3828                                               << 
3829         details.even_cows = even_cows;        << 
3830         if (last_index < first_index)         << 
3831                 last_index = ULONG_MAX;       << 
3832                                               << 
3833         i_mmap_lock_read(mapping);            << 
3834         if (unlikely(!RB_EMPTY_ROOT(&mapping- << 
3835                 unmap_mapping_range_tree(&map << 
3836                                          last << 
3837         i_mmap_unlock_read(mapping);          << 
3838 }                                             << 
3839 EXPORT_SYMBOL_GPL(unmap_mapping_pages);       << 
3840                                               << 
3841 /**                                           << 
3842  * unmap_mapping_range - unmap the portion of    2652  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3843  * address_space corresponding to the specifi !! 2653  * address_space corresponding to the specified page range in the underlying
3844  * file.                                         2654  * file.
3845  *                                               2655  *
3846  * @mapping: the address space containing mma    2656  * @mapping: the address space containing mmaps to be unmapped.
3847  * @holebegin: byte in first page to unmap, r    2657  * @holebegin: byte in first page to unmap, relative to the start of
3848  * the underlying file.  This will be rounded    2658  * the underlying file.  This will be rounded down to a PAGE_SIZE
3849  * boundary.  Note that this is different fro    2659  * boundary.  Note that this is different from truncate_pagecache(), which
3850  * must keep the partial page.  In contrast,     2660  * must keep the partial page.  In contrast, we must get rid of
3851  * partial pages.                                2661  * partial pages.
3852  * @holelen: size of prospective hole in byte    2662  * @holelen: size of prospective hole in bytes.  This will be rounded
3853  * up to a PAGE_SIZE boundary.  A holelen of     2663  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3854  * end of the file.                              2664  * end of the file.
3855  * @even_cows: 1 when truncating a file, unma    2665  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3856  * but 0 when invalidating pagecache, don't t    2666  * but 0 when invalidating pagecache, don't throw away private data.
3857  */                                              2667  */
3858 void unmap_mapping_range(struct address_space    2668 void unmap_mapping_range(struct address_space *mapping,
3859                 loff_t const holebegin, loff_    2669                 loff_t const holebegin, loff_t const holelen, int even_cows)
3860 {                                                2670 {
3861         pgoff_t hba = (pgoff_t)(holebegin) >> !! 2671         struct zap_details details = { };
3862         pgoff_t hlen = ((pgoff_t)(holelen) +  !! 2672         pgoff_t hba = holebegin >> PAGE_SHIFT;
                                                   >> 2673         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3863                                                  2674 
3864         /* Check for overflow. */                2675         /* Check for overflow. */
3865         if (sizeof(holelen) > sizeof(hlen)) {    2676         if (sizeof(holelen) > sizeof(hlen)) {
3866                 long long holeend =              2677                 long long holeend =
3867                         (holebegin + holelen     2678                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3868                 if (holeend & ~(long long)ULO    2679                 if (holeend & ~(long long)ULONG_MAX)
3869                         hlen = ULONG_MAX - hb    2680                         hlen = ULONG_MAX - hba + 1;
3870         }                                        2681         }
3871                                                  2682 
3872         unmap_mapping_pages(mapping, hba, hle !! 2683         details.check_mapping = even_cows ? NULL : mapping;
                                                   >> 2684         details.first_index = hba;
                                                   >> 2685         details.last_index = hba + hlen - 1;
                                                   >> 2686         if (details.last_index < details.first_index)
                                                   >> 2687                 details.last_index = ULONG_MAX;
                                                   >> 2688 
                                                   >> 2689         i_mmap_lock_write(mapping);
                                                   >> 2690         if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                                                   >> 2691                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
                                                   >> 2692         i_mmap_unlock_write(mapping);
3873 }                                                2693 }
3874 EXPORT_SYMBOL(unmap_mapping_range);              2694 EXPORT_SYMBOL(unmap_mapping_range);
3875                                                  2695 
3876 /*                                               2696 /*
3877  * Restore a potential device exclusive pte t !! 2697  * We enter with non-exclusive mmap_sem (to exclude vma changes,
3878  */                                           << 
3879 static vm_fault_t remove_device_exclusive_ent << 
3880 {                                             << 
3881         struct folio *folio = page_folio(vmf- << 
3882         struct vm_area_struct *vma = vmf->vma << 
3883         struct mmu_notifier_range range;      << 
3884         vm_fault_t ret;                       << 
3885                                               << 
3886         /*                                    << 
3887          * We need a reference to lock the fo << 
3888          * the PTL so a racing thread can rem << 
3889          * entry and unmap it. If the folio i << 
3890          * have been removed already. If it h << 
3891          * been re-allocated after being free << 
3892          * unlock it.                         << 
3893          */                                   << 
3894         if (!folio_try_get(folio))            << 
3895                 return 0;                     << 
3896                                               << 
3897         ret = folio_lock_or_retry(folio, vmf) << 
3898         if (ret) {                            << 
3899                 folio_put(folio);             << 
3900                 return ret;                   << 
3901         }                                     << 
3902         mmu_notifier_range_init_owner(&range, << 
3903                                 vma->vm_mm, v << 
3904                                 (vmf->address << 
3905         mmu_notifier_invalidate_range_start(& << 
3906                                               << 
3907         vmf->pte = pte_offset_map_lock(vma->v << 
3908                                 &vmf->ptl);   << 
3909         if (likely(vmf->pte && pte_same(ptep_ << 
3910                 restore_exclusive_pte(vma, vm << 
3911                                               << 
3912         if (vmf->pte)                         << 
3913                 pte_unmap_unlock(vmf->pte, vm << 
3914         folio_unlock(folio);                  << 
3915         folio_put(folio);                     << 
3916                                               << 
3917         mmu_notifier_invalidate_range_end(&ra << 
3918         return 0;                             << 
3919 }                                             << 
3920                                               << 
3921 static inline bool should_try_to_free_swap(st << 
3922                                            st << 
3923                                            un << 
3924 {                                             << 
3925         if (!folio_test_swapcache(folio))     << 
3926                 return false;                 << 
3927         if (mem_cgroup_swap_full(folio) || (v << 
3928             folio_test_mlocked(folio))        << 
3929                 return true;                  << 
3930         /*                                    << 
3931          * If we want to map a page that's in << 
3932          * have to detect via the refcount if << 
3933          * user. Try freeing the swapcache to << 
3934          * reference only in case it's likely << 
3935          */                                   << 
3936         return (fault_flags & FAULT_FLAG_WRIT << 
3937                 folio_ref_count(folio) == (1  << 
3938 }                                             << 
3939                                               << 
3940 static vm_fault_t pte_marker_clear(struct vm_ << 
3941 {                                             << 
3942         vmf->pte = pte_offset_map_lock(vmf->v << 
3943                                        vmf->a << 
3944         if (!vmf->pte)                        << 
3945                 return 0;                     << 
3946         /*                                    << 
3947          * Be careful so that we will only re << 
3948          * none pte.  Otherwise it means the  << 
3949          *                                    << 
3950          * This should also cover the case wh << 
3951          * quickly from a PTE_MARKER_UFFD_WP  << 
3952          * So is_pte_marker() check is not en << 
3953          */                                   << 
3954         if (pte_same(vmf->orig_pte, ptep_get( << 
3955                 pte_clear(vmf->vma->vm_mm, vm << 
3956         pte_unmap_unlock(vmf->pte, vmf->ptl); << 
3957         return 0;                             << 
3958 }                                             << 
3959                                               << 
3960 static vm_fault_t do_pte_missing(struct vm_fa << 
3961 {                                             << 
3962         if (vma_is_anonymous(vmf->vma))       << 
3963                 return do_anonymous_page(vmf) << 
3964         else                                  << 
3965                 return do_fault(vmf);         << 
3966 }                                             << 
3967                                               << 
3968 /*                                            << 
3969  * This is actually a page-missing access, bu << 
3970  * installed.  It means this pte was wr-prote << 
3971  */                                           << 
3972 static vm_fault_t pte_marker_handle_uffd_wp(s << 
3973 {                                             << 
3974         /*                                    << 
3975          * Just in case there're leftover spe << 
3976          * got unregistered - we can simply c << 
3977          */                                   << 
3978         if (unlikely(!userfaultfd_wp(vmf->vma << 
3979                 return pte_marker_clear(vmf); << 
3980                                               << 
3981         return do_pte_missing(vmf);           << 
3982 }                                             << 
3983                                               << 
3984 static vm_fault_t handle_pte_marker(struct vm << 
3985 {                                             << 
3986         swp_entry_t entry = pte_to_swp_entry( << 
3987         unsigned long marker = pte_marker_get << 
3988                                               << 
3989         /*                                    << 
3990          * PTE markers should never be empty. << 
3991          * the best thing to do is to kill th << 
3992          */                                   << 
3993         if (WARN_ON_ONCE(!marker))            << 
3994                 return VM_FAULT_SIGBUS;       << 
3995                                               << 
3996         /* Higher priority than uffd-wp when  << 
3997         if (marker & PTE_MARKER_POISONED)     << 
3998                 return VM_FAULT_HWPOISON;     << 
3999                                               << 
4000         if (pte_marker_entry_uffd_wp(entry))  << 
4001                 return pte_marker_handle_uffd << 
4002                                               << 
4003         /* This is an unknown pte marker */   << 
4004         return VM_FAULT_SIGBUS;               << 
4005 }                                             << 
4006                                               << 
4007 static struct folio *__alloc_swap_folio(struc << 
4008 {                                             << 
4009         struct vm_area_struct *vma = vmf->vma << 
4010         struct folio *folio;                  << 
4011         swp_entry_t entry;                    << 
4012                                               << 
4013         folio = vma_alloc_folio(GFP_HIGHUSER_ << 
4014                                 vmf->address, << 
4015         if (!folio)                           << 
4016                 return NULL;                  << 
4017                                               << 
4018         entry = pte_to_swp_entry(vmf->orig_pt << 
4019         if (mem_cgroup_swapin_charge_folio(fo << 
4020                                            GF << 
4021                 folio_put(folio);             << 
4022                 return NULL;                  << 
4023         }                                     << 
4024                                               << 
4025         return folio;                         << 
4026 }                                             << 
4027                                               << 
4028 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            << 
4029 static inline int non_swapcache_batch(swp_ent << 
4030 {                                             << 
4031         struct swap_info_struct *si = swp_swa << 
4032         pgoff_t offset = swp_offset(entry);   << 
4033         int i;                                << 
4034                                               << 
4035         /*                                    << 
4036          * While allocating a large folio and << 
4037          * the case the being faulted pte doe << 
4038          * ensure all PTEs have no cache as w << 
4039          * swap devices while the content is  << 
4040          */                                   << 
4041         for (i = 0; i < max_nr; i++) {        << 
4042                 if ((si->swap_map[offset + i] << 
4043                         return i;             << 
4044         }                                     << 
4045                                               << 
4046         return i;                             << 
4047 }                                             << 
4048                                               << 
4049 /*                                            << 
4050  * Check if the PTEs within a range are conti << 
4051  * and have consistent swapcache, zeromap.    << 
4052  */                                           << 
4053 static bool can_swapin_thp(struct vm_fault *v << 
4054 {                                             << 
4055         unsigned long addr;                   << 
4056         swp_entry_t entry;                    << 
4057         int idx;                              << 
4058         pte_t pte;                            << 
4059                                               << 
4060         addr = ALIGN_DOWN(vmf->address, nr_pa << 
4061         idx = (vmf->address - addr) / PAGE_SI << 
4062         pte = ptep_get(ptep);                 << 
4063                                               << 
4064         if (!pte_same(pte, pte_move_swp_offse << 
4065                 return false;                 << 
4066         entry = pte_to_swp_entry(pte);        << 
4067         if (swap_pte_batch(ptep, nr_pages, pt << 
4068                 return false;                 << 
4069                                               << 
4070         /*                                    << 
4071          * swap_read_folio() can't handle the << 
4072          * from different backends. And they  << 
4073          * things might be added once zswap s << 
4074          */                                   << 
4075         if (unlikely(swap_zeromap_batch(entry << 
4076                 return false;                 << 
4077         if (unlikely(non_swapcache_batch(entr << 
4078                 return false;                 << 
4079                                               << 
4080         return true;                          << 
4081 }                                             << 
4082                                               << 
4083 static inline unsigned long thp_swap_suitable << 
4084                                               << 
4085                                               << 
4086 {                                             << 
4087         int order, nr;                        << 
4088                                               << 
4089         order = highest_order(orders);        << 
4090                                               << 
4091         /*                                    << 
4092          * To swap in a THP with nr pages, we << 
4093          * is aligned with that number, as it << 
4094          * This helps filter out most invalid << 
4095          */                                   << 
4096         while (orders) {                      << 
4097                 nr = 1 << order;              << 
4098                 if ((addr >> PAGE_SHIFT) % nr << 
4099                         break;                << 
4100                 order = next_order(&orders, o << 
4101         }                                     << 
4102                                               << 
4103         return orders;                        << 
4104 }                                             << 
4105                                               << 
4106 static struct folio *alloc_swap_folio(struct  << 
4107 {                                             << 
4108         struct vm_area_struct *vma = vmf->vma << 
4109         unsigned long orders;                 << 
4110         struct folio *folio;                  << 
4111         unsigned long addr;                   << 
4112         swp_entry_t entry;                    << 
4113         spinlock_t *ptl;                      << 
4114         pte_t *pte;                           << 
4115         gfp_t gfp;                            << 
4116         int order;                            << 
4117                                               << 
4118         /*                                    << 
4119          * If uffd is active for the vma we n << 
4120          * maintain the uffd semantics.       << 
4121          */                                   << 
4122         if (unlikely(userfaultfd_armed(vma))) << 
4123                 goto fallback;                << 
4124                                               << 
4125         /*                                    << 
4126          * A large swapped out folio could be << 
4127          * lack handling for such cases, so f << 
4128          * folio.                             << 
4129          */                                   << 
4130         if (!zswap_never_enabled())           << 
4131                 goto fallback;                << 
4132                                               << 
4133         entry = pte_to_swp_entry(vmf->orig_pt << 
4134         /*                                    << 
4135          * Get a list of all the (large) orde << 
4136          * and suitable for swapping THP.     << 
4137          */                                   << 
4138         orders = thp_vma_allowable_orders(vma << 
4139                         TVA_IN_PF | TVA_ENFOR << 
4140         orders = thp_vma_suitable_orders(vma, << 
4141         orders = thp_swap_suitable_orders(swp << 
4142                                           vmf << 
4143                                               << 
4144         if (!orders)                          << 
4145                 goto fallback;                << 
4146                                               << 
4147         pte = pte_offset_map_lock(vmf->vma->v << 
4148                                   vmf->addres << 
4149         if (unlikely(!pte))                   << 
4150                 goto fallback;                << 
4151                                               << 
4152         /*                                    << 
4153          * For do_swap_page, find the highest << 
4154          * completely swap entries with conti << 
4155          */                                   << 
4156         order = highest_order(orders);        << 
4157         while (orders) {                      << 
4158                 addr = ALIGN_DOWN(vmf->addres << 
4159                 if (can_swapin_thp(vmf, pte + << 
4160                         break;                << 
4161                 order = next_order(&orders, o << 
4162         }                                     << 
4163                                               << 
4164         pte_unmap_unlock(pte, ptl);           << 
4165                                               << 
4166         /* Try allocating the highest of the  << 
4167         gfp = vma_thp_gfp_mask(vma);          << 
4168         while (orders) {                      << 
4169                 addr = ALIGN_DOWN(vmf->addres << 
4170                 folio = vma_alloc_folio(gfp,  << 
4171                 if (folio) {                  << 
4172                         if (!mem_cgroup_swapi << 
4173                                               << 
4174                                 return folio; << 
4175                         folio_put(folio);     << 
4176                 }                             << 
4177                 order = next_order(&orders, o << 
4178         }                                     << 
4179                                               << 
4180 fallback:                                     << 
4181         return __alloc_swap_folio(vmf);       << 
4182 }                                             << 
4183 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */      << 
4184 static struct folio *alloc_swap_folio(struct  << 
4185 {                                             << 
4186         return __alloc_swap_folio(vmf);       << 
4187 }                                             << 
4188 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */      << 
4189                                               << 
4190 static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq); << 
4191                                               << 
4192 /*                                            << 
4193  * We enter with non-exclusive mmap_lock (to  << 
4194  * but allow concurrent faults), and pte mapp    2698  * but allow concurrent faults), and pte mapped but not yet locked.
4195  * We return with pte unmapped and unlocked.     2699  * We return with pte unmapped and unlocked.
4196  *                                               2700  *
4197  * We return with the mmap_lock locked or unl !! 2701  * We return with the mmap_sem locked or unlocked in the same cases
4198  * as does filemap_fault().                      2702  * as does filemap_fault().
4199  */                                              2703  */
4200 vm_fault_t do_swap_page(struct vm_fault *vmf) !! 2704 int do_swap_page(struct vm_fault *vmf)
4201 {                                                2705 {
4202         struct vm_area_struct *vma = vmf->vma    2706         struct vm_area_struct *vma = vmf->vma;
4203         struct folio *swapcache, *folio = NUL !! 2707         struct page *page, *swapcache;
4204         DECLARE_WAITQUEUE(wait, current);     !! 2708         struct mem_cgroup *memcg;
4205         struct page *page;                    << 
4206         struct swap_info_struct *si = NULL;   << 
4207         rmap_t rmap_flags = RMAP_NONE;        << 
4208         bool need_clear_cache = false;        << 
4209         bool exclusive = false;               << 
4210         swp_entry_t entry;                       2709         swp_entry_t entry;
4211         pte_t pte;                               2710         pte_t pte;
4212         vm_fault_t ret = 0;                   !! 2711         int locked;
4213         void *shadow = NULL;                  !! 2712         int exclusive = 0;
4214         int nr_pages;                         !! 2713         int ret = 0;
4215         unsigned long page_idx;               << 
4216         unsigned long address;                << 
4217         pte_t *ptep;                          << 
4218                                                  2714 
4219         if (!pte_unmap_same(vmf))             !! 2715         if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
4220                 goto out;                        2716                 goto out;
4221                                                  2717 
4222         entry = pte_to_swp_entry(vmf->orig_pt    2718         entry = pte_to_swp_entry(vmf->orig_pte);
4223         if (unlikely(non_swap_entry(entry)))     2719         if (unlikely(non_swap_entry(entry))) {
4224                 if (is_migration_entry(entry)    2720                 if (is_migration_entry(entry)) {
4225                         migration_entry_wait(    2721                         migration_entry_wait(vma->vm_mm, vmf->pmd,
4226                                                  2722                                              vmf->address);
4227                 } else if (is_device_exclusiv << 
4228                         vmf->page = pfn_swap_ << 
4229                         ret = remove_device_e << 
4230                 } else if (is_device_private_ << 
4231                         if (vmf->flags & FAUL << 
4232                                 /*            << 
4233                                  * migrate_to << 
4234                                  * under VMA  << 
4235                                  */           << 
4236                                 vma_end_read( << 
4237                                 ret = VM_FAUL << 
4238                                 goto out;     << 
4239                         }                     << 
4240                                               << 
4241                         vmf->page = pfn_swap_ << 
4242                         vmf->pte = pte_offset << 
4243                                         vmf-> << 
4244                         if (unlikely(!vmf->pt << 
4245                                      !pte_sam << 
4246                                               << 
4247                                 goto unlock;  << 
4248                                               << 
4249                         /*                    << 
4250                          * Get a page referen << 
4251                          * freed.             << 
4252                          */                   << 
4253                         get_page(vmf->page);  << 
4254                         pte_unmap_unlock(vmf- << 
4255                         ret = vmf->page->pgma << 
4256                         put_page(vmf->page);  << 
4257                 } else if (is_hwpoison_entry(    2723                 } else if (is_hwpoison_entry(entry)) {
4258                         ret = VM_FAULT_HWPOIS    2724                         ret = VM_FAULT_HWPOISON;
4259                 } else if (is_pte_marker_entr << 
4260                         ret = handle_pte_mark << 
4261                 } else {                         2725                 } else {
4262                         print_bad_pte(vma, vm    2726                         print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4263                         ret = VM_FAULT_SIGBUS    2727                         ret = VM_FAULT_SIGBUS;
4264                 }                                2728                 }
4265                 goto out;                        2729                 goto out;
4266         }                                        2730         }
4267                                               !! 2731         delayacct_set_flag(DELAYACCT_PF_SWAPIN);
4268         /* Prevent swapoff from happening to  !! 2732         page = lookup_swap_cache(entry);
4269         si = get_swap_device(entry);          !! 2733         if (!page) {
4270         if (unlikely(!si))                    !! 2734                 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vma,
4271                 goto out;                     !! 2735                                         vmf->address);
4272                                               !! 2736                 if (!page) {
4273         folio = swap_cache_get_folio(entry, v << 
4274         if (folio)                            << 
4275                 page = folio_file_page(folio, << 
4276         swapcache = folio;                    << 
4277                                               << 
4278         if (!folio) {                         << 
4279                 if (data_race(si->flags & SWP << 
4280                     __swap_count(entry) == 1) << 
4281                         /* skip swapcache */  << 
4282                         folio = alloc_swap_fo << 
4283                         if (folio) {          << 
4284                                 __folio_set_l << 
4285                                 __folio_set_s << 
4286                                               << 
4287                                 nr_pages = fo << 
4288                                 if (folio_tes << 
4289                                         entry << 
4290                                 /*            << 
4291                                  * Prevent pa << 
4292                                  * the cache  << 
4293                                  * may finish << 
4294                                  * swapout re << 
4295                                  * undetectab << 
4296                                  * to entry r << 
4297                                  */           << 
4298                                 if (swapcache << 
4299                                         /*    << 
4300                                          * Re << 
4301                                          * re << 
4302                                          */   << 
4303                                         add_w << 
4304                                         sched << 
4305                                         remov << 
4306                                         goto  << 
4307                                 }             << 
4308                                 need_clear_ca << 
4309                                               << 
4310                                 mem_cgroup_sw << 
4311                                               << 
4312                                 shadow = get_ << 
4313                                 if (shadow)   << 
4314                                         worki << 
4315                                               << 
4316                                 folio_add_lru << 
4317                                               << 
4318                                 /* To provide << 
4319                                 folio->swap = << 
4320                                 swap_read_fol << 
4321                                 folio->privat << 
4322                         }                     << 
4323                 } else {                      << 
4324                         folio = swapin_readah << 
4325                                               << 
4326                         swapcache = folio;    << 
4327                 }                             << 
4328                                               << 
4329                 if (!folio) {                 << 
4330                         /*                       2737                         /*
4331                          * Back out if somebo    2738                          * Back out if somebody else faulted in this pte
4332                          * while we released     2739                          * while we released the pte lock.
4333                          */                      2740                          */
4334                         vmf->pte = pte_offset    2741                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4335                                         vmf->    2742                                         vmf->address, &vmf->ptl);
4336                         if (likely(vmf->pte & !! 2743                         if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
4337                                    pte_same(p << 
4338                                 ret = VM_FAUL    2744                                 ret = VM_FAULT_OOM;
                                                   >> 2745                         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
4339                         goto unlock;             2746                         goto unlock;
4340                 }                                2747                 }
4341                                                  2748 
4342                 /* Had to read the page from     2749                 /* Had to read the page from swap area: Major fault */
4343                 ret = VM_FAULT_MAJOR;            2750                 ret = VM_FAULT_MAJOR;
4344                 count_vm_event(PGMAJFAULT);      2751                 count_vm_event(PGMAJFAULT);
4345                 count_memcg_event_mm(vma->vm_    2752                 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4346                 page = folio_file_page(folio, << 
4347         } else if (PageHWPoison(page)) {         2753         } else if (PageHWPoison(page)) {
4348                 /*                               2754                 /*
4349                  * hwpoisoned dirty swapcache    2755                  * hwpoisoned dirty swapcache pages are kept for killing
4350                  * owner processes (which may    2756                  * owner processes (which may be unknown at hwpoison time)
4351                  */                              2757                  */
4352                 ret = VM_FAULT_HWPOISON;         2758                 ret = VM_FAULT_HWPOISON;
                                                   >> 2759                 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
                                                   >> 2760                 swapcache = page;
4353                 goto out_release;                2761                 goto out_release;
4354         }                                        2762         }
4355                                                  2763 
4356         ret |= folio_lock_or_retry(folio, vmf !! 2764         swapcache = page;
4357         if (ret & VM_FAULT_RETRY)             !! 2765         locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
4358                 goto out_release;             << 
4359                                                  2766 
4360         if (swapcache) {                      !! 2767         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
4361                 /*                            !! 2768         if (!locked) {
4362                  * Make sure folio_free_swap( !! 2769                 ret |= VM_FAULT_RETRY;
4363                  * swapcache from under us.   !! 2770                 goto out_release;
4364                  * below, are not enough to e !! 2771         }
4365                  * swapcache, we need to chec << 
4366                  * changed.                   << 
4367                  */                           << 
4368                 if (unlikely(!folio_test_swap << 
4369                              page_swap_entry( << 
4370                         goto out_page;        << 
4371                                                  2772 
4372                 /*                            !! 2773         /*
4373                  * KSM sometimes has to copy  !! 2774          * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
4374                  * page->index of !PageKSM()  !! 2775          * release the swapcache from under us.  The page pin, and pte_same
4375                  * anon VMA -- PageKSM() is l !! 2776          * test below, are not enough to exclude that.  Even if it is still
4376                  */                           !! 2777          * swapcache, we need to check that the page's swap has not changed.
4377                 folio = ksm_might_need_to_cop !! 2778          */
4378                 if (unlikely(!folio)) {       !! 2779         if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
4379                         ret = VM_FAULT_OOM;   !! 2780                 goto out_page;
4380                         folio = swapcache;    << 
4381                         goto out_page;        << 
4382                 } else if (unlikely(folio ==  << 
4383                         ret = VM_FAULT_HWPOIS << 
4384                         folio = swapcache;    << 
4385                         goto out_page;        << 
4386                 }                             << 
4387                 if (folio != swapcache)       << 
4388                         page = folio_page(fol << 
4389                                                  2781 
4390                 /*                            !! 2782         page = ksm_might_need_to_copy(page, vma, vmf->address);
4391                  * If we want to map a page t !! 2783         if (unlikely(!page)) {
4392                  * have to detect via the ref !! 2784                 ret = VM_FAULT_OOM;
4393                  * owner. Try removing the ex !! 2785                 page = swapcache;
4394                  * caches if required.        !! 2786                 goto out_page;
4395                  */                           << 
4396                 if ((vmf->flags & FAULT_FLAG_ << 
4397                     !folio_test_ksm(folio) && << 
4398                         lru_add_drain();      << 
4399         }                                        2787         }
4400                                                  2788 
4401         folio_throttle_swaprate(folio, GFP_KE !! 2789         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
                                                   >> 2790                                 &memcg, false)) {
                                                   >> 2791                 ret = VM_FAULT_OOM;
                                                   >> 2792                 goto out_page;
                                                   >> 2793         }
4402                                                  2794 
4403         /*                                       2795         /*
4404          * Back out if somebody else already     2796          * Back out if somebody else already faulted in this pte.
4405          */                                      2797          */
4406         vmf->pte = pte_offset_map_lock(vma->v    2798         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4407                         &vmf->ptl);              2799                         &vmf->ptl);
4408         if (unlikely(!vmf->pte || !pte_same(p !! 2800         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
4409                 goto out_nomap;                  2801                 goto out_nomap;
4410                                                  2802 
4411         if (unlikely(!folio_test_uptodate(fol !! 2803         if (unlikely(!PageUptodate(page))) {
4412                 ret = VM_FAULT_SIGBUS;           2804                 ret = VM_FAULT_SIGBUS;
4413                 goto out_nomap;                  2805                 goto out_nomap;
4414         }                                        2806         }
4415                                                  2807 
4416         /* allocated large folios for SWP_SYN << 
4417         if (folio_test_large(folio) && !folio << 
4418                 unsigned long nr = folio_nr_p << 
4419                 unsigned long folio_start = A << 
4420                 unsigned long idx = (vmf->add << 
4421                 pte_t *folio_ptep = vmf->pte  << 
4422                 pte_t folio_pte = ptep_get(fo << 
4423                                               << 
4424                 if (!pte_same(folio_pte, pte_ << 
4425                     swap_pte_batch(folio_ptep << 
4426                         goto out_nomap;       << 
4427                                               << 
4428                 page_idx = idx;               << 
4429                 address = folio_start;        << 
4430                 ptep = folio_ptep;            << 
4431                 goto check_folio;             << 
4432         }                                     << 
4433                                               << 
4434         nr_pages = 1;                         << 
4435         page_idx = 0;                         << 
4436         address = vmf->address;               << 
4437         ptep = vmf->pte;                      << 
4438         if (folio_test_large(folio) && folio_ << 
4439                 int nr = folio_nr_pages(folio << 
4440                 unsigned long idx = folio_pag << 
4441                 unsigned long folio_start = a << 
4442                 unsigned long folio_end = fol << 
4443                 pte_t *folio_ptep;            << 
4444                 pte_t folio_pte;              << 
4445                                               << 
4446                 if (unlikely(folio_start < ma << 
4447                         goto check_folio;     << 
4448                 if (unlikely(folio_end > pmd_ << 
4449                         goto check_folio;     << 
4450                                               << 
4451                 folio_ptep = vmf->pte - idx;  << 
4452                 folio_pte = ptep_get(folio_pt << 
4453                 if (!pte_same(folio_pte, pte_ << 
4454                     swap_pte_batch(folio_ptep << 
4455                         goto check_folio;     << 
4456                                               << 
4457                 page_idx = idx;               << 
4458                 address = folio_start;        << 
4459                 ptep = folio_ptep;            << 
4460                 nr_pages = nr;                << 
4461                 entry = folio->swap;          << 
4462                 page = &folio->page;          << 
4463         }                                     << 
4464                                               << 
4465 check_folio:                                  << 
4466         /*                                    << 
4467          * PG_anon_exclusive reuses PG_mapped << 
4468          * must never point at an anonymous p << 
4469          * PG_anon_exclusive. Sanity check th << 
4470          * no filesystem set PG_mappedtodisk  << 
4471          * check after taking the PT lock and << 
4472          * concurrently faulted in this page  << 
4473          */                                   << 
4474         BUG_ON(!folio_test_anon(folio) && fol << 
4475         BUG_ON(folio_test_anon(folio) && Page << 
4476                                               << 
4477         /*                                    << 
4478          * Check under PT lock (to protect ag << 
4479          * the swap entry concurrently) for c << 
4480          */                                   << 
4481         if (!folio_test_ksm(folio)) {         << 
4482                 exclusive = pte_swp_exclusive << 
4483                 if (folio != swapcache) {     << 
4484                         /*                    << 
4485                          * We have a fresh pa << 
4486                          * swapcache -> certa << 
4487                          */                   << 
4488                         exclusive = true;     << 
4489                 } else if (exclusive && folio << 
4490                           data_race(si->flags << 
4491                         /*                    << 
4492                          * This is tricky: no << 
4493                          * concurrent page mo << 
4494                          *                    << 
4495                          * So if we stumble o << 
4496                          * we must not set th << 
4497                          * map it writable wi << 
4498                          * while still under  << 
4499                          *                    << 
4500                          * For these problema << 
4501                          * exclusive marker:  << 
4502                          * writeback only if  << 
4503                          * there are no unexp << 
4504                          * unmapping succeede << 
4505                          * further GUP refere << 
4506                          * appear, so droppin << 
4507                          * it only R/O is fin << 
4508                          */                   << 
4509                         exclusive = false;    << 
4510                 }                             << 
4511         }                                     << 
4512                                               << 
4513         /*                                    << 
4514          * Some architectures may have to res << 
4515          * when reading from swap. This metad << 
4516          * so this must be called before swap << 
4517          */                                   << 
4518         arch_swap_restore(folio_swap(entry, f << 
4519                                               << 
4520         /*                                       2808         /*
4521          * Remove the swap entry and conditio !! 2809          * The page isn't present yet, go ahead with the fault.
4522          * We're already holding a reference  !! 2810          *
4523          * yet.                               !! 2811          * Be careful about the sequence of operations here.
                                                   >> 2812          * To get its accounting right, reuse_swap_page() must be called
                                                   >> 2813          * while the page is counted on swap but not yet in mapcount i.e.
                                                   >> 2814          * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
                                                   >> 2815          * must be called after the swap_free(), or it will never succeed.
4524          */                                      2816          */
4525         swap_free_nr(entry, nr_pages);        << 
4526         if (should_try_to_free_swap(folio, vm << 
4527                 folio_free_swap(folio);       << 
4528                                                  2817 
4529         add_mm_counter(vma->vm_mm, MM_ANONPAG !! 2818         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4530         add_mm_counter(vma->vm_mm, MM_SWAPENT !! 2819         dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
4531         pte = mk_pte(page, vma->vm_page_prot)    2820         pte = mk_pte(page, vma->vm_page_prot);
                                                   >> 2821         if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
                                                   >> 2822                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                                                   >> 2823                 vmf->flags &= ~FAULT_FLAG_WRITE;
                                                   >> 2824                 ret |= VM_FAULT_WRITE;
                                                   >> 2825                 exclusive = RMAP_EXCLUSIVE;
                                                   >> 2826         }
                                                   >> 2827         flush_icache_page(vma, page);
4532         if (pte_swp_soft_dirty(vmf->orig_pte)    2828         if (pte_swp_soft_dirty(vmf->orig_pte))
4533                 pte = pte_mksoft_dirty(pte);     2829                 pte = pte_mksoft_dirty(pte);
4534         if (pte_swp_uffd_wp(vmf->orig_pte))   !! 2830         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4535                 pte = pte_mkuffd_wp(pte);     !! 2831         vmf->orig_pte = pte;
4536                                               !! 2832         if (page == swapcache) {
4537         /*                                    !! 2833                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
4538          * Same logic as in do_wp_page(); how !! 2834                 mem_cgroup_commit_charge(page, memcg, true, false);
4539          * certainly not shared either becaus !! 2835                 activate_page(page);
4540          * exposing them to the swapcache or  !! 2836         } else { /* ksm created a completely new copy */
4541          * exclusivity.                       !! 2837                 page_add_new_anon_rmap(page, vma, vmf->address, false);
4542          */                                   !! 2838                 mem_cgroup_commit_charge(page, memcg, false, false);
4543         if (!folio_test_ksm(folio) &&         !! 2839                 lru_cache_add_active_or_unevictable(page, vma);
4544             (exclusive || folio_ref_count(fol !! 2840         }
4545                 if ((vma->vm_flags & VM_WRITE !! 2841 
4546                     !pte_needs_soft_dirty_wp( !! 2842         swap_free(entry);
4547                         pte = pte_mkwrite(pte !! 2843         if (mem_cgroup_swap_full(page) ||
4548                         if (vmf->flags & FAUL !! 2844             (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
4549                                 pte = pte_mkd !! 2845                 try_to_free_swap(page);
4550                                 vmf->flags &= !! 2846         unlock_page(page);
4551                         }                     !! 2847         if (page != swapcache) {
4552                 }                             << 
4553                 rmap_flags |= RMAP_EXCLUSIVE; << 
4554         }                                     << 
4555         folio_ref_add(folio, nr_pages - 1);   << 
4556         flush_icache_pages(vma, page, nr_page << 
4557         vmf->orig_pte = pte_advance_pfn(pte,  << 
4558                                               << 
4559         /* ksm created a completely new copy  << 
4560         if (unlikely(folio != swapcache && sw << 
4561                 folio_add_new_anon_rmap(folio << 
4562                 folio_add_lru_vma(folio, vma) << 
4563         } else if (!folio_test_anon(folio)) { << 
4564                 /*                            << 
4565                  * We currently only expect s << 
4566                  * fully exclusive or fully s << 
4567                  * folios which are fully exc << 
4568                  * folios within swapcache he << 
4569                  */                           << 
4570                 VM_WARN_ON_ONCE(folio_test_la << 
4571                 VM_WARN_ON_FOLIO(!folio_test_ << 
4572                 folio_add_new_anon_rmap(folio << 
4573         } else {                              << 
4574                 folio_add_anon_rmap_ptes(foli << 
4575                                         rmap_ << 
4576         }                                     << 
4577                                               << 
4578         VM_BUG_ON(!folio_test_anon(folio) ||  << 
4579                         (pte_write(pte) && !P << 
4580         set_ptes(vma->vm_mm, address, ptep, p << 
4581         arch_do_swap_page_nr(vma->vm_mm, vma, << 
4582                         pte, pte, nr_pages);  << 
4583                                               << 
4584         folio_unlock(folio);                  << 
4585         if (folio != swapcache && swapcache)  << 
4586                 /*                               2848                 /*
4587                  * Hold the lock to avoid the    2849                  * Hold the lock to avoid the swap entry to be reused
4588                  * until we take the PT lock     2850                  * until we take the PT lock for the pte_same() check
4589                  * (to avoid false positives     2851                  * (to avoid false positives from pte_same). For
4590                  * further safety release the    2852                  * further safety release the lock after the swap_free
4591                  * so that the swap count won    2853                  * so that the swap count won't change under a
4592                  * parallel locked swapcache.    2854                  * parallel locked swapcache.
4593                  */                              2855                  */
4594                 folio_unlock(swapcache);      !! 2856                 unlock_page(swapcache);
4595                 folio_put(swapcache);         !! 2857                 put_page(swapcache);
4596         }                                        2858         }
4597                                                  2859 
4598         if (vmf->flags & FAULT_FLAG_WRITE) {     2860         if (vmf->flags & FAULT_FLAG_WRITE) {
4599                 ret |= do_wp_page(vmf);          2861                 ret |= do_wp_page(vmf);
4600                 if (ret & VM_FAULT_ERROR)        2862                 if (ret & VM_FAULT_ERROR)
4601                         ret &= VM_FAULT_ERROR    2863                         ret &= VM_FAULT_ERROR;
4602                 goto out;                        2864                 goto out;
4603         }                                        2865         }
4604                                                  2866 
4605         /* No need to invalidate - it was non    2867         /* No need to invalidate - it was non-present before */
4606         update_mmu_cache_range(vmf, vma, addr !! 2868         update_mmu_cache(vma, vmf->address, vmf->pte);
4607 unlock:                                          2869 unlock:
4608         if (vmf->pte)                         !! 2870         pte_unmap_unlock(vmf->pte, vmf->ptl);
4609                 pte_unmap_unlock(vmf->pte, vm << 
4610 out:                                             2871 out:
4611         /* Clear the swap cache pin for direc << 
4612         if (need_clear_cache) {               << 
4613                 swapcache_clear(si, entry, nr << 
4614                 if (waitqueue_active(&swapcac << 
4615                         wake_up(&swapcache_wq << 
4616         }                                     << 
4617         if (si)                               << 
4618                 put_swap_device(si);          << 
4619         return ret;                              2872         return ret;
4620 out_nomap:                                       2873 out_nomap:
4621         if (vmf->pte)                         !! 2874         mem_cgroup_cancel_charge(page, memcg, false);
4622                 pte_unmap_unlock(vmf->pte, vm !! 2875         pte_unmap_unlock(vmf->pte, vmf->ptl);
4623 out_page:                                        2876 out_page:
4624         folio_unlock(folio);                  !! 2877         unlock_page(page);
4625 out_release:                                     2878 out_release:
4626         folio_put(folio);                     !! 2879         put_page(page);
4627         if (folio != swapcache && swapcache)  !! 2880         if (page != swapcache) {
4628                 folio_unlock(swapcache);      !! 2881                 unlock_page(swapcache);
4629                 folio_put(swapcache);         !! 2882                 put_page(swapcache);
4630         }                                     << 
4631         if (need_clear_cache) {               << 
4632                 swapcache_clear(si, entry, nr << 
4633                 if (waitqueue_active(&swapcac << 
4634                         wake_up(&swapcache_wq << 
4635         }                                        2883         }
4636         if (si)                               << 
4637                 put_swap_device(si);          << 
4638         return ret;                              2884         return ret;
4639 }                                                2885 }
4640                                                  2886 
4641 static bool pte_range_none(pte_t *pte, int nr << 
4642 {                                             << 
4643         int i;                                << 
4644                                               << 
4645         for (i = 0; i < nr_pages; i++) {      << 
4646                 if (!pte_none(ptep_get_lockle << 
4647                         return false;         << 
4648         }                                     << 
4649                                               << 
4650         return true;                          << 
4651 }                                             << 
4652                                               << 
4653 static struct folio *alloc_anon_folio(struct  << 
4654 {                                             << 
4655         struct vm_area_struct *vma = vmf->vma << 
4656 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            << 
4657         unsigned long orders;                 << 
4658         struct folio *folio;                  << 
4659         unsigned long addr;                   << 
4660         pte_t *pte;                           << 
4661         gfp_t gfp;                            << 
4662         int order;                            << 
4663                                               << 
4664         /*                                    << 
4665          * If uffd is active for the vma we n << 
4666          * maintain the uffd semantics.       << 
4667          */                                   << 
4668         if (unlikely(userfaultfd_armed(vma))) << 
4669                 goto fallback;                << 
4670                                               << 
4671         /*                                    << 
4672          * Get a list of all the (large) orde << 
4673          * for this vma. Then filter out the  << 
4674          * the faulting address and still be  << 
4675          */                                   << 
4676         orders = thp_vma_allowable_orders(vma << 
4677                         TVA_IN_PF | TVA_ENFOR << 
4678         orders = thp_vma_suitable_orders(vma, << 
4679                                               << 
4680         if (!orders)                          << 
4681                 goto fallback;                << 
4682                                               << 
4683         pte = pte_offset_map(vmf->pmd, vmf->a << 
4684         if (!pte)                             << 
4685                 return ERR_PTR(-EAGAIN);      << 
4686                                               << 
4687         /*                                    << 
4688          * Find the highest order where the a << 
4689          * pte_none(). Note that all remainin << 
4690          * pte_none().                        << 
4691          */                                   << 
4692         order = highest_order(orders);        << 
4693         while (orders) {                      << 
4694                 addr = ALIGN_DOWN(vmf->addres << 
4695                 if (pte_range_none(pte + pte_ << 
4696                         break;                << 
4697                 order = next_order(&orders, o << 
4698         }                                     << 
4699                                               << 
4700         pte_unmap(pte);                       << 
4701                                               << 
4702         if (!orders)                          << 
4703                 goto fallback;                << 
4704                                               << 
4705         /* Try allocating the highest of the  << 
4706         gfp = vma_thp_gfp_mask(vma);          << 
4707         while (orders) {                      << 
4708                 addr = ALIGN_DOWN(vmf->addres << 
4709                 folio = vma_alloc_folio(gfp,  << 
4710                 if (folio) {                  << 
4711                         if (mem_cgroup_charge << 
4712                                 count_mthp_st << 
4713                                 folio_put(fol << 
4714                                 goto next;    << 
4715                         }                     << 
4716                         folio_throttle_swapra << 
4717                         folio_zero_user(folio << 
4718                         return folio;         << 
4719                 }                             << 
4720 next:                                         << 
4721                 count_mthp_stat(order, MTHP_S << 
4722                 order = next_order(&orders, o << 
4723         }                                     << 
4724                                               << 
4725 fallback:                                     << 
4726 #endif                                        << 
4727         return folio_prealloc(vma->vm_mm, vma << 
4728 }                                             << 
4729                                               << 
4730 /*                                               2887 /*
4731  * We enter with non-exclusive mmap_lock (to  !! 2888  * We enter with non-exclusive mmap_sem (to exclude vma changes,
4732  * but allow concurrent faults), and pte mapp    2889  * but allow concurrent faults), and pte mapped but not yet locked.
4733  * We return with mmap_lock still held, but p !! 2890  * We return with mmap_sem still held, but pte unmapped and unlocked.
4734  */                                              2891  */
4735 static vm_fault_t do_anonymous_page(struct vm !! 2892 static int do_anonymous_page(struct vm_fault *vmf)
4736 {                                                2893 {
4737         struct vm_area_struct *vma = vmf->vma    2894         struct vm_area_struct *vma = vmf->vma;
4738         unsigned long addr = vmf->address;    !! 2895         struct mem_cgroup *memcg;
4739         struct folio *folio;                  !! 2896         struct page *page;
4740         vm_fault_t ret = 0;                   !! 2897         int ret = 0;
4741         int nr_pages = 1;                     << 
4742         pte_t entry;                             2898         pte_t entry;
4743                                                  2899 
4744         /* File mapping without ->vm_ops ? */    2900         /* File mapping without ->vm_ops ? */
4745         if (vma->vm_flags & VM_SHARED)           2901         if (vma->vm_flags & VM_SHARED)
4746                 return VM_FAULT_SIGBUS;          2902                 return VM_FAULT_SIGBUS;
4747                                                  2903 
4748         /*                                       2904         /*
4749          * Use pte_alloc() instead of pte_all !! 2905          * Use pte_alloc() instead of pte_alloc_map().  We can't run
4750          * be distinguished from a transient  !! 2906          * pte_offset_map() on pmds where a huge pmd might be created
                                                   >> 2907          * from a different thread.
                                                   >> 2908          *
                                                   >> 2909          * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
                                                   >> 2910          * parallel threads are excluded by other means.
                                                   >> 2911          *
                                                   >> 2912          * Here we only have down_read(mmap_sem).
4751          */                                      2913          */
4752         if (pte_alloc(vma->vm_mm, vmf->pmd))  !! 2914         if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
4753                 return VM_FAULT_OOM;             2915                 return VM_FAULT_OOM;
4754                                                  2916 
                                                   >> 2917         /* See the comment in pte_alloc_one_map() */
                                                   >> 2918         if (unlikely(pmd_trans_unstable(vmf->pmd)))
                                                   >> 2919                 return 0;
                                                   >> 2920 
4755         /* Use the zero-page for reads */        2921         /* Use the zero-page for reads */
4756         if (!(vmf->flags & FAULT_FLAG_WRITE)     2922         if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4757                         !mm_forbids_zeropage(    2923                         !mm_forbids_zeropage(vma->vm_mm)) {
4758                 entry = pte_mkspecial(pfn_pte    2924                 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4759                                                  2925                                                 vma->vm_page_prot));
4760                 vmf->pte = pte_offset_map_loc    2926                 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4761                                 vmf->address,    2927                                 vmf->address, &vmf->ptl);
4762                 if (!vmf->pte)                !! 2928                 if (!pte_none(*vmf->pte))
4763                         goto unlock;             2929                         goto unlock;
4764                 if (vmf_pte_changed(vmf)) {   << 
4765                         update_mmu_tlb(vma, v << 
4766                         goto unlock;          << 
4767                 }                             << 
4768                 ret = check_stable_address_sp    2930                 ret = check_stable_address_space(vma->vm_mm);
4769                 if (ret)                         2931                 if (ret)
4770                         goto unlock;             2932                         goto unlock;
4771                 /* Deliver the page fault to     2933                 /* Deliver the page fault to userland, check inside PT lock */
4772                 if (userfaultfd_missing(vma))    2934                 if (userfaultfd_missing(vma)) {
4773                         pte_unmap_unlock(vmf-    2935                         pte_unmap_unlock(vmf->pte, vmf->ptl);
4774                         return handle_userfau    2936                         return handle_userfault(vmf, VM_UFFD_MISSING);
4775                 }                                2937                 }
4776                 goto setpte;                     2938                 goto setpte;
4777         }                                        2939         }
4778                                                  2940 
4779         /* Allocate our own private page. */     2941         /* Allocate our own private page. */
4780         ret = vmf_anon_prepare(vmf);          !! 2942         if (unlikely(anon_vma_prepare(vma)))
4781         if (ret)                              !! 2943                 goto oom;
4782                 return ret;                   !! 2944         page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
4783         /* Returns NULL on OOM or ERR_PTR(-EA !! 2945         if (!page)
4784         folio = alloc_anon_folio(vmf);        << 
4785         if (IS_ERR(folio))                    << 
4786                 return 0;                     << 
4787         if (!folio)                           << 
4788                 goto oom;                        2946                 goto oom;
4789                                                  2947 
4790         nr_pages = folio_nr_pages(folio);     !! 2948         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
4791         addr = ALIGN_DOWN(vmf->address, nr_pa !! 2949                 goto oom_free_page;
4792                                                  2950 
4793         /*                                       2951         /*
4794          * The memory barrier inside __folio_ !! 2952          * The memory barrier inside __SetPageUptodate makes sure that
4795          * preceding stores to the page conte !! 2953          * preceeding stores to the page contents become visible before
4796          * the set_pte_at() write.               2954          * the set_pte_at() write.
4797          */                                      2955          */
4798         __folio_mark_uptodate(folio);         !! 2956         __SetPageUptodate(page);
4799                                                  2957 
4800         entry = mk_pte(&folio->page, vma->vm_ !! 2958         entry = mk_pte(page, vma->vm_page_prot);
4801         entry = pte_sw_mkyoung(entry);        << 
4802         if (vma->vm_flags & VM_WRITE)            2959         if (vma->vm_flags & VM_WRITE)
4803                 entry = pte_mkwrite(pte_mkdir !! 2960                 entry = pte_mkwrite(pte_mkdirty(entry));
4804                                                  2961 
4805         vmf->pte = pte_offset_map_lock(vma->v !! 2962         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4806         if (!vmf->pte)                        !! 2963                         &vmf->ptl);
4807                 goto release;                 !! 2964         if (!pte_none(*vmf->pte))
4808         if (nr_pages == 1 && vmf_pte_changed( << 
4809                 update_mmu_tlb(vma, addr, vmf << 
4810                 goto release;                 << 
4811         } else if (nr_pages > 1 && !pte_range << 
4812                 update_mmu_tlb_range(vma, add << 
4813                 goto release;                    2965                 goto release;
4814         }                                     << 
4815                                                  2966 
4816         ret = check_stable_address_space(vma-    2967         ret = check_stable_address_space(vma->vm_mm);
4817         if (ret)                                 2968         if (ret)
4818                 goto release;                    2969                 goto release;
4819                                                  2970 
4820         /* Deliver the page fault to userland    2971         /* Deliver the page fault to userland, check inside PT lock */
4821         if (userfaultfd_missing(vma)) {          2972         if (userfaultfd_missing(vma)) {
4822                 pte_unmap_unlock(vmf->pte, vm    2973                 pte_unmap_unlock(vmf->pte, vmf->ptl);
4823                 folio_put(folio);             !! 2974                 mem_cgroup_cancel_charge(page, memcg, false);
                                                   >> 2975                 put_page(page);
4824                 return handle_userfault(vmf,     2976                 return handle_userfault(vmf, VM_UFFD_MISSING);
4825         }                                        2977         }
4826                                                  2978 
4827         folio_ref_add(folio, nr_pages - 1);   !! 2979         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4828         add_mm_counter(vma->vm_mm, MM_ANONPAG !! 2980         page_add_new_anon_rmap(page, vma, vmf->address, false);
4829         count_mthp_stat(folio_order(folio), M !! 2981         mem_cgroup_commit_charge(page, memcg, false, false);
4830         folio_add_new_anon_rmap(folio, vma, a !! 2982         lru_cache_add_active_or_unevictable(page, vma);
4831         folio_add_lru_vma(folio, vma);        << 
4832 setpte:                                          2983 setpte:
4833         if (vmf_orig_pte_uffd_wp(vmf))        !! 2984         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4834                 entry = pte_mkuffd_wp(entry); << 
4835         set_ptes(vma->vm_mm, addr, vmf->pte,  << 
4836                                                  2985 
4837         /* No need to invalidate - it was non    2986         /* No need to invalidate - it was non-present before */
4838         update_mmu_cache_range(vmf, vma, addr !! 2987         update_mmu_cache(vma, vmf->address, vmf->pte);
4839 unlock:                                          2988 unlock:
4840         if (vmf->pte)                         !! 2989         pte_unmap_unlock(vmf->pte, vmf->ptl);
4841                 pte_unmap_unlock(vmf->pte, vm << 
4842         return ret;                              2990         return ret;
4843 release:                                         2991 release:
4844         folio_put(folio);                     !! 2992         mem_cgroup_cancel_charge(page, memcg, false);
                                                   >> 2993         put_page(page);
4845         goto unlock;                             2994         goto unlock;
                                                   >> 2995 oom_free_page:
                                                   >> 2996         put_page(page);
4846 oom:                                             2997 oom:
4847         return VM_FAULT_OOM;                     2998         return VM_FAULT_OOM;
4848 }                                                2999 }
4849                                                  3000 
4850 /*                                               3001 /*
4851  * The mmap_lock must have been held on entry !! 3002  * The mmap_sem must have been held on entry, and may have been
4852  * released depending on flags and vma->vm_op    3003  * released depending on flags and vma->vm_ops->fault() return value.
4853  * See filemap_fault() and __lock_page_retry(    3004  * See filemap_fault() and __lock_page_retry().
4854  */                                              3005  */
4855 static vm_fault_t __do_fault(struct vm_fault  !! 3006 static int __do_fault(struct vm_fault *vmf)
4856 {                                                3007 {
4857         struct vm_area_struct *vma = vmf->vma    3008         struct vm_area_struct *vma = vmf->vma;
4858         struct folio *folio;                  !! 3009         int ret;
4859         vm_fault_t ret;                       << 
4860                                               << 
4861         /*                                    << 
4862          * Preallocate pte before we take pag << 
4863          * deadlocks for memcg reclaim which  << 
4864          *                              lock_ << 
4865          *                              SetPa << 
4866          *                              unloc << 
4867          * lock_page(B)                       << 
4868          *                              lock_ << 
4869          * pte_alloc_one                      << 
4870          *   shrink_folio_list                << 
4871          *     wait_on_page_writeback(A)      << 
4872          *                              SetPa << 
4873          *                              unloc << 
4874          *                              # flu << 
4875          */                                   << 
4876         if (pmd_none(*vmf->pmd) && !vmf->prea << 
4877                 vmf->prealloc_pte = pte_alloc << 
4878                 if (!vmf->prealloc_pte)       << 
4879                         return VM_FAULT_OOM;  << 
4880         }                                     << 
4881                                                  3010 
4882         ret = vma->vm_ops->fault(vmf);           3011         ret = vma->vm_ops->fault(vmf);
4883         if (unlikely(ret & (VM_FAULT_ERROR |     3012         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4884                             VM_FAULT_DONE_COW    3013                             VM_FAULT_DONE_COW)))
4885                 return ret;                      3014                 return ret;
4886                                                  3015 
4887         folio = page_folio(vmf->page);        << 
4888         if (unlikely(PageHWPoison(vmf->page))    3016         if (unlikely(PageHWPoison(vmf->page))) {
4889                 vm_fault_t poisonret = VM_FAU !! 3017                 if (ret & VM_FAULT_LOCKED)
4890                 if (ret & VM_FAULT_LOCKED) {  !! 3018                         unlock_page(vmf->page);
4891                         if (page_mapped(vmf-> !! 3019                 put_page(vmf->page);
4892                                 unmap_mapping << 
4893                         /* Retry if a clean f << 
4894                         if (mapping_evict_fol << 
4895                                 poisonret = V << 
4896                         folio_unlock(folio);  << 
4897                 }                             << 
4898                 folio_put(folio);             << 
4899                 vmf->page = NULL;                3020                 vmf->page = NULL;
4900                 return poisonret;             !! 3021                 return VM_FAULT_HWPOISON;
4901         }                                        3022         }
4902                                                  3023 
4903         if (unlikely(!(ret & VM_FAULT_LOCKED)    3024         if (unlikely(!(ret & VM_FAULT_LOCKED)))
4904                 folio_lock(folio);            !! 3025                 lock_page(vmf->page);
4905         else                                     3026         else
4906                 VM_BUG_ON_PAGE(!folio_test_lo !! 3027                 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
4907                                                  3028 
4908         return ret;                              3029         return ret;
4909 }                                                3030 }
4910                                                  3031 
4911 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            !! 3032 /*
                                                   >> 3033  * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
                                                   >> 3034  * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
                                                   >> 3035  * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
                                                   >> 3036  * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
                                                   >> 3037  */
                                                   >> 3038 static int pmd_devmap_trans_unstable(pmd_t *pmd)
                                                   >> 3039 {
                                                   >> 3040         return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
                                                   >> 3041 }
                                                   >> 3042 
                                                   >> 3043 static int pte_alloc_one_map(struct vm_fault *vmf)
                                                   >> 3044 {
                                                   >> 3045         struct vm_area_struct *vma = vmf->vma;
                                                   >> 3046 
                                                   >> 3047         if (!pmd_none(*vmf->pmd))
                                                   >> 3048                 goto map_pte;
                                                   >> 3049         if (vmf->prealloc_pte) {
                                                   >> 3050                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
                                                   >> 3051                 if (unlikely(!pmd_none(*vmf->pmd))) {
                                                   >> 3052                         spin_unlock(vmf->ptl);
                                                   >> 3053                         goto map_pte;
                                                   >> 3054                 }
                                                   >> 3055 
                                                   >> 3056                 atomic_long_inc(&vma->vm_mm->nr_ptes);
                                                   >> 3057                 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
                                                   >> 3058                 spin_unlock(vmf->ptl);
                                                   >> 3059                 vmf->prealloc_pte = NULL;
                                                   >> 3060         } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
                                                   >> 3061                 return VM_FAULT_OOM;
                                                   >> 3062         }
                                                   >> 3063 map_pte:
                                                   >> 3064         /*
                                                   >> 3065          * If a huge pmd materialized under us just retry later.  Use
                                                   >> 3066          * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
                                                   >> 3067          * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
                                                   >> 3068          * under us and then back to pmd_none, as a result of MADV_DONTNEED
                                                   >> 3069          * running immediately after a huge pmd fault in a different thread of
                                                   >> 3070          * this mm, in turn leading to a misleading pmd_trans_huge() retval.
                                                   >> 3071          * All we have to ensure is that it is a regular pmd that we can walk
                                                   >> 3072          * with pte_offset_map() and we can do that through an atomic read in
                                                   >> 3073          * C, which is what pmd_trans_unstable() provides.
                                                   >> 3074          */
                                                   >> 3075         if (pmd_devmap_trans_unstable(vmf->pmd))
                                                   >> 3076                 return VM_FAULT_NOPAGE;
                                                   >> 3077 
                                                   >> 3078         /*
                                                   >> 3079          * At this point we know that our vmf->pmd points to a page of ptes
                                                   >> 3080          * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
                                                   >> 3081          * for the duration of the fault.  If a racing MADV_DONTNEED runs and
                                                   >> 3082          * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
                                                   >> 3083          * be valid and we will re-check to make sure the vmf->pte isn't
                                                   >> 3084          * pte_none() under vmf->ptl protection when we return to
                                                   >> 3085          * alloc_set_pte().
                                                   >> 3086          */
                                                   >> 3087         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
                                                   >> 3088                         &vmf->ptl);
                                                   >> 3089         return 0;
                                                   >> 3090 }
                                                   >> 3091 
                                                   >> 3092 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
                                                   >> 3093 
                                                   >> 3094 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
                                                   >> 3095 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
                                                   >> 3096                 unsigned long haddr)
                                                   >> 3097 {
                                                   >> 3098         if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
                                                   >> 3099                         (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
                                                   >> 3100                 return false;
                                                   >> 3101         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
                                                   >> 3102                 return false;
                                                   >> 3103         return true;
                                                   >> 3104 }
                                                   >> 3105 
4912 static void deposit_prealloc_pte(struct vm_fa    3106 static void deposit_prealloc_pte(struct vm_fault *vmf)
4913 {                                                3107 {
4914         struct vm_area_struct *vma = vmf->vma    3108         struct vm_area_struct *vma = vmf->vma;
4915                                                  3109 
4916         pgtable_trans_huge_deposit(vma->vm_mm    3110         pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4917         /*                                       3111         /*
4918          * We are going to consume the preall    3112          * We are going to consume the prealloc table,
4919          * count that as nr_ptes.                3113          * count that as nr_ptes.
4920          */                                      3114          */
4921         mm_inc_nr_ptes(vma->vm_mm);           !! 3115         atomic_long_inc(&vma->vm_mm->nr_ptes);
4922         vmf->prealloc_pte = NULL;                3116         vmf->prealloc_pte = NULL;
4923 }                                                3117 }
4924                                                  3118 
4925 vm_fault_t do_set_pmd(struct vm_fault *vmf, s !! 3119 static int do_set_pmd(struct vm_fault *vmf, struct page *page)
4926 {                                                3120 {
4927         struct folio *folio = page_folio(page << 
4928         struct vm_area_struct *vma = vmf->vma    3121         struct vm_area_struct *vma = vmf->vma;
4929         bool write = vmf->flags & FAULT_FLAG_    3122         bool write = vmf->flags & FAULT_FLAG_WRITE;
4930         unsigned long haddr = vmf->address &     3123         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4931         pmd_t entry;                             3124         pmd_t entry;
4932         vm_fault_t ret = VM_FAULT_FALLBACK;   !! 3125         int i, ret;
4933                                                  3126 
4934         /*                                    !! 3127         if (!transhuge_vma_suitable(vma, haddr))
4935          * It is too late to allocate a small !! 3128                 return VM_FAULT_FALLBACK;
4936          * folio in the pagecache: especially << 
4937          * PMD mappings, but PTE-mapped THP a << 
4938          * PMD mappings if THPs are disabled. << 
4939          */                                   << 
4940         if (thp_disabled_by_hw() || vma_thp_d << 
4941                 return ret;                   << 
4942                                               << 
4943         if (!thp_vma_suitable_order(vma, hadd << 
4944                 return ret;                   << 
4945                                                  3129 
4946         if (folio_order(folio) != HPAGE_PMD_O !! 3130         ret = VM_FAULT_FALLBACK;
4947                 return ret;                   !! 3131         page = compound_head(page);
4948         page = &folio->page;                  << 
4949                                                  3132 
4950         /*                                       3133         /*
4951          * Just backoff if any subpage of a T !! 3134          * Archs like ppc64 need additonal space to store information
4952          * the corrupted page may mapped by P << 
4953          * check.  This kind of THP just can  << 
4954          * the corrupted subpage should trigg << 
4955          */                                   << 
4956         if (unlikely(folio_test_has_hwpoisone << 
4957                 return ret;                   << 
4958                                               << 
4959         /*                                    << 
4960          * Archs like ppc64 need additional s << 
4961          * related to pte entry. Use the prea    3135          * related to pte entry. Use the preallocated table for that.
4962          */                                      3136          */
4963         if (arch_needs_pgtable_deposit() && !    3137         if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4964                 vmf->prealloc_pte = pte_alloc !! 3138                 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
4965                 if (!vmf->prealloc_pte)          3139                 if (!vmf->prealloc_pte)
4966                         return VM_FAULT_OOM;     3140                         return VM_FAULT_OOM;
                                                   >> 3141                 smp_wmb(); /* See comment in __pte_alloc() */
4967         }                                        3142         }
4968                                                  3143 
4969         vmf->ptl = pmd_lock(vma->vm_mm, vmf->    3144         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4970         if (unlikely(!pmd_none(*vmf->pmd)))      3145         if (unlikely(!pmd_none(*vmf->pmd)))
4971                 goto out;                        3146                 goto out;
4972                                                  3147 
4973         flush_icache_pages(vma, page, HPAGE_P !! 3148         for (i = 0; i < HPAGE_PMD_NR; i++)
                                                   >> 3149                 flush_icache_page(vma, page + i);
4974                                                  3150 
4975         entry = mk_huge_pmd(page, vma->vm_pag    3151         entry = mk_huge_pmd(page, vma->vm_page_prot);
4976         if (write)                               3152         if (write)
4977                 entry = maybe_pmd_mkwrite(pmd    3153                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4978                                                  3154 
4979         add_mm_counter(vma->vm_mm, mm_counter !! 3155         add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
4980         folio_add_file_rmap_pmd(folio, page,  !! 3156         page_add_file_rmap(page, true);
4981                                               << 
4982         /*                                       3157         /*
4983          * deposit and withdraw with pmd lock    3158          * deposit and withdraw with pmd lock held
4984          */                                      3159          */
4985         if (arch_needs_pgtable_deposit())        3160         if (arch_needs_pgtable_deposit())
4986                 deposit_prealloc_pte(vmf);       3161                 deposit_prealloc_pte(vmf);
4987                                                  3162 
4988         set_pmd_at(vma->vm_mm, haddr, vmf->pm    3163         set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4989                                                  3164 
4990         update_mmu_cache_pmd(vma, haddr, vmf-    3165         update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4991                                                  3166 
4992         /* fault is handled */                   3167         /* fault is handled */
4993         ret = 0;                                 3168         ret = 0;
4994         count_vm_event(THP_FILE_MAPPED);         3169         count_vm_event(THP_FILE_MAPPED);
4995 out:                                             3170 out:
4996         spin_unlock(vmf->ptl);                   3171         spin_unlock(vmf->ptl);
4997         return ret;                              3172         return ret;
4998 }                                                3173 }
4999 #else                                            3174 #else
5000 vm_fault_t do_set_pmd(struct vm_fault *vmf, s !! 3175 static int do_set_pmd(struct vm_fault *vmf, struct page *page)
5001 {                                                3176 {
5002         return VM_FAULT_FALLBACK;             !! 3177         BUILD_BUG();
                                                   >> 3178         return 0;
5003 }                                                3179 }
5004 #endif                                           3180 #endif
5005                                                  3181 
5006 /**                                              3182 /**
5007  * set_pte_range - Set a range of PTEs to poi !! 3183  * alloc_set_pte - setup new PTE entry for given page and add reverse page
5008  * @vmf: Fault decription.                    !! 3184  * mapping. If needed, the fucntion allocates page table or use pre-allocated.
5009  * @folio: The folio that contains @page.     !! 3185  *
5010  * @page: The first page to create a PTE for. !! 3186  * @vmf: fault environment
5011  * @nr: The number of PTEs to create.         !! 3187  * @memcg: memcg to charge page (only for private mappings)
5012  * @addr: The first address to create a PTE f !! 3188  * @page: page to map
                                                   >> 3189  *
                                                   >> 3190  * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
                                                   >> 3191  * return.
                                                   >> 3192  *
                                                   >> 3193  * Target users are page handler itself and implementations of
                                                   >> 3194  * vm_ops->map_pages.
5013  */                                              3195  */
5014 void set_pte_range(struct vm_fault *vmf, stru !! 3196 int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
5015                 struct page *page, unsigned i !! 3197                 struct page *page)
5016 {                                                3198 {
5017         struct vm_area_struct *vma = vmf->vma    3199         struct vm_area_struct *vma = vmf->vma;
5018         bool write = vmf->flags & FAULT_FLAG_    3200         bool write = vmf->flags & FAULT_FLAG_WRITE;
5019         bool prefault = !in_range(vmf->addres << 
5020         pte_t entry;                             3201         pte_t entry;
                                                   >> 3202         int ret;
5021                                                  3203 
5022         flush_icache_pages(vma, page, nr);    !! 3204         if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
5023         entry = mk_pte(page, vma->vm_page_pro !! 3205                         IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
                                                   >> 3206                 /* THP on COW? */
                                                   >> 3207                 VM_BUG_ON_PAGE(memcg, page);
5024                                                  3208 
5025         if (prefault && arch_wants_old_prefau !! 3209                 ret = do_set_pmd(vmf, page);
5026                 entry = pte_mkold(entry);     !! 3210                 if (ret != VM_FAULT_FALLBACK)
5027         else                                  !! 3211                         return ret;
5028                 entry = pte_sw_mkyoung(entry) !! 3212         }
                                                   >> 3213 
                                                   >> 3214         if (!vmf->pte) {
                                                   >> 3215                 ret = pte_alloc_one_map(vmf);
                                                   >> 3216                 if (ret)
                                                   >> 3217                         return ret;
                                                   >> 3218         }
5029                                                  3219 
                                                   >> 3220         /* Re-check under ptl */
                                                   >> 3221         if (unlikely(!pte_none(*vmf->pte)))
                                                   >> 3222                 return VM_FAULT_NOPAGE;
                                                   >> 3223 
                                                   >> 3224         flush_icache_page(vma, page);
                                                   >> 3225         entry = mk_pte(page, vma->vm_page_prot);
5030         if (write)                               3226         if (write)
5031                 entry = maybe_mkwrite(pte_mkd    3227                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5032         if (unlikely(vmf_orig_pte_uffd_wp(vmf << 
5033                 entry = pte_mkuffd_wp(entry); << 
5034         /* copy-on-write page */                 3228         /* copy-on-write page */
5035         if (write && !(vma->vm_flags & VM_SHA    3229         if (write && !(vma->vm_flags & VM_SHARED)) {
5036                 VM_BUG_ON_FOLIO(nr != 1, foli !! 3230                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
5037                 folio_add_new_anon_rmap(folio !! 3231                 page_add_new_anon_rmap(page, vma, vmf->address, false);
5038                 folio_add_lru_vma(folio, vma) !! 3232                 mem_cgroup_commit_charge(page, memcg, false, false);
                                                   >> 3233                 lru_cache_add_active_or_unevictable(page, vma);
5039         } else {                                 3234         } else {
5040                 folio_add_file_rmap_ptes(foli !! 3235                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
                                                   >> 3236                 page_add_file_rmap(page, false);
5041         }                                        3237         }
5042         set_ptes(vma->vm_mm, addr, vmf->pte,  !! 3238         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
5043                                                  3239 
5044         /* no need to invalidate: a not-prese    3240         /* no need to invalidate: a not-present page won't be cached */
5045         update_mmu_cache_range(vmf, vma, addr !! 3241         update_mmu_cache(vma, vmf->address, vmf->pte);
5046 }                                             << 
5047                                                  3242 
5048 static bool vmf_pte_changed(struct vm_fault * !! 3243         return 0;
5049 {                                             << 
5050         if (vmf->flags & FAULT_FLAG_ORIG_PTE_ << 
5051                 return !pte_same(ptep_get(vmf << 
5052                                               << 
5053         return !pte_none(ptep_get(vmf->pte)); << 
5054 }                                                3244 }
5055                                                  3245 
                                                   >> 3246 
5056 /**                                              3247 /**
5057  * finish_fault - finish page fault once we h    3248  * finish_fault - finish page fault once we have prepared the page to fault
5058  *                                               3249  *
5059  * @vmf: structure describing the fault          3250  * @vmf: structure describing the fault
5060  *                                               3251  *
5061  * This function handles all that is needed t    3252  * This function handles all that is needed to finish a page fault once the
5062  * page to fault in is prepared. It handles l    3253  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5063  * given page, adds reverse page mapping, han    3254  * given page, adds reverse page mapping, handles memcg charges and LRU
5064  * addition.                                  !! 3255  * addition. The function returns 0 on success, VM_FAULT_ code in case of
                                                   >> 3256  * error.
5065  *                                               3257  *
5066  * The function expects the page to be locked    3258  * The function expects the page to be locked and on success it consumes a
5067  * reference of a page being mapped (for the     3259  * reference of a page being mapped (for the PTE which maps it).
5068  *                                            << 
5069  * Return: %0 on success, %VM_FAULT_ code in  << 
5070  */                                              3260  */
5071 vm_fault_t finish_fault(struct vm_fault *vmf) !! 3261 int finish_fault(struct vm_fault *vmf)
5072 {                                                3262 {
5073         struct vm_area_struct *vma = vmf->vma << 
5074         struct page *page;                       3263         struct page *page;
5075         struct folio *folio;                  !! 3264         int ret = 0;
5076         vm_fault_t ret;                       << 
5077         bool is_cow = (vmf->flags & FAULT_FLA << 
5078                       !(vma->vm_flags & VM_SH << 
5079         int type, nr_pages;                   << 
5080         unsigned long addr = vmf->address;    << 
5081                                                  3265 
5082         /* Did we COW the page? */               3266         /* Did we COW the page? */
5083         if (is_cow)                           !! 3267         if ((vmf->flags & FAULT_FLAG_WRITE) &&
                                                   >> 3268             !(vmf->vma->vm_flags & VM_SHARED))
5084                 page = vmf->cow_page;            3269                 page = vmf->cow_page;
5085         else                                     3270         else
5086                 page = vmf->page;                3271                 page = vmf->page;
5087                                                  3272 
5088         /*                                       3273         /*
5089          * check even for read faults because    3274          * check even for read faults because we might have lost our CoWed
5090          * page                                  3275          * page
5091          */                                      3276          */
5092         if (!(vma->vm_flags & VM_SHARED)) {   !! 3277         if (!(vmf->vma->vm_flags & VM_SHARED))
5093                 ret = check_stable_address_sp !! 3278                 ret = check_stable_address_space(vmf->vma->vm_mm);
5094                 if (ret)                      !! 3279         if (!ret)
5095                         return ret;           !! 3280                 ret = alloc_set_pte(vmf, vmf->memcg, page);
5096         }                                     !! 3281         if (vmf->pte)
5097                                               !! 3282                 pte_unmap_unlock(vmf->pte, vmf->ptl);
5098         if (pmd_none(*vmf->pmd)) {            << 
5099                 if (PageTransCompound(page))  << 
5100                         ret = do_set_pmd(vmf, << 
5101                         if (ret != VM_FAULT_F << 
5102                                 return ret;   << 
5103                 }                             << 
5104                                               << 
5105                 if (vmf->prealloc_pte)        << 
5106                         pmd_install(vma->vm_m << 
5107                 else if (unlikely(pte_alloc(v << 
5108                         return VM_FAULT_OOM;  << 
5109         }                                     << 
5110                                               << 
5111         folio = page_folio(page);             << 
5112         nr_pages = folio_nr_pages(folio);     << 
5113                                               << 
5114         /*                                    << 
5115          * Using per-page fault to maintain t << 
5116          * approach also applies to non-anony << 
5117          * inflating the RSS of the process.  << 
5118          */                                   << 
5119         if (!vma_is_anon_shmem(vma) || unlike << 
5120                 nr_pages = 1;                 << 
5121         } else if (nr_pages > 1) {            << 
5122                 pgoff_t idx = folio_page_idx( << 
5123                 /* The page offset of vmf->ad << 
5124                 pgoff_t vma_off = vmf->pgoff  << 
5125                 /* The index of the entry in  << 
5126                 pgoff_t pte_off = pte_index(v << 
5127                                               << 
5128                 /*                            << 
5129                  * Fallback to per-page fault << 
5130                  * cache beyond the VMA limit << 
5131                  */                           << 
5132                 if (unlikely(vma_off < idx || << 
5133                             vma_off + (nr_pag << 
5134                             pte_off < idx ||  << 
5135                             pte_off + (nr_pag << 
5136                         nr_pages = 1;         << 
5137                 } else {                      << 
5138                         /* Now we can set map << 
5139                         addr = vmf->address - << 
5140                         page = &folio->page;  << 
5141                 }                             << 
5142         }                                     << 
5143                                               << 
5144         vmf->pte = pte_offset_map_lock(vma->v << 
5145                                        addr,  << 
5146         if (!vmf->pte)                        << 
5147                 return VM_FAULT_NOPAGE;       << 
5148                                               << 
5149         /* Re-check under ptl */              << 
5150         if (nr_pages == 1 && unlikely(vmf_pte << 
5151                 update_mmu_tlb(vma, addr, vmf << 
5152                 ret = VM_FAULT_NOPAGE;        << 
5153                 goto unlock;                  << 
5154         } else if (nr_pages > 1 && !pte_range << 
5155                 update_mmu_tlb_range(vma, add << 
5156                 ret = VM_FAULT_NOPAGE;        << 
5157                 goto unlock;                  << 
5158         }                                     << 
5159                                               << 
5160         folio_ref_add(folio, nr_pages - 1);   << 
5161         set_pte_range(vmf, folio, page, nr_pa << 
5162         type = is_cow ? MM_ANONPAGES : mm_cou << 
5163         add_mm_counter(vma->vm_mm, type, nr_p << 
5164         ret = 0;                              << 
5165                                               << 
5166 unlock:                                       << 
5167         pte_unmap_unlock(vmf->pte, vmf->ptl); << 
5168         return ret;                              3283         return ret;
5169 }                                                3284 }
5170                                                  3285 
5171 static unsigned long fault_around_pages __rea !! 3286 static unsigned long fault_around_bytes __read_mostly =
5172         65536 >> PAGE_SHIFT;                  !! 3287         rounddown_pow_of_two(65536);
5173                                                  3288 
5174 #ifdef CONFIG_DEBUG_FS                           3289 #ifdef CONFIG_DEBUG_FS
5175 static int fault_around_bytes_get(void *data,    3290 static int fault_around_bytes_get(void *data, u64 *val)
5176 {                                                3291 {
5177         *val = fault_around_pages << PAGE_SHI !! 3292         *val = fault_around_bytes;
5178         return 0;                                3293         return 0;
5179 }                                                3294 }
5180                                                  3295 
5181 /*                                               3296 /*
5182  * fault_around_bytes must be rounded down to !! 3297  * fault_around_pages() and fault_around_mask() expects fault_around_bytes
5183  * what do_fault_around() expects to see.     !! 3298  * rounded down to nearest page order. It's what do_fault_around() expects to
                                                   >> 3299  * see.
5184  */                                              3300  */
5185 static int fault_around_bytes_set(void *data,    3301 static int fault_around_bytes_set(void *data, u64 val)
5186 {                                                3302 {
5187         if (val / PAGE_SIZE > PTRS_PER_PTE)      3303         if (val / PAGE_SIZE > PTRS_PER_PTE)
5188                 return -EINVAL;                  3304                 return -EINVAL;
5189                                               !! 3305         if (val > PAGE_SIZE)
5190         /*                                    !! 3306                 fault_around_bytes = rounddown_pow_of_two(val);
5191          * The minimum value is 1 page, howev !! 3307         else
5192          * at all. See should_fault_around(). !! 3308                 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
5193          */                                   << 
5194         val = max(val, PAGE_SIZE);            << 
5195         fault_around_pages = rounddown_pow_of << 
5196                                               << 
5197         return 0;                                3309         return 0;
5198 }                                                3310 }
5199 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_f    3311 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5200                 fault_around_bytes_get, fault    3312                 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5201                                                  3313 
5202 static int __init fault_around_debugfs(void)     3314 static int __init fault_around_debugfs(void)
5203 {                                                3315 {
5204         debugfs_create_file_unsafe("fault_aro !! 3316         void *ret;
5205                                    &fault_aro !! 3317 
                                                   >> 3318         ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
                                                   >> 3319                         &fault_around_bytes_fops);
                                                   >> 3320         if (!ret)
                                                   >> 3321                 pr_warn("Failed to create fault_around_bytes in debugfs");
5206         return 0;                                3322         return 0;
5207 }                                                3323 }
5208 late_initcall(fault_around_debugfs);             3324 late_initcall(fault_around_debugfs);
5209 #endif                                           3325 #endif
5210                                                  3326 
5211 /*                                               3327 /*
5212  * do_fault_around() tries to map few pages a    3328  * do_fault_around() tries to map few pages around the fault address. The hope
5213  * is that the pages will be needed soon and     3329  * is that the pages will be needed soon and this will lower the number of
5214  * faults to handle.                             3330  * faults to handle.
5215  *                                               3331  *
5216  * It uses vm_ops->map_pages() to map the pag    3332  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5217  * not ready to be mapped: not up-to-date, lo    3333  * not ready to be mapped: not up-to-date, locked, etc.
5218  *                                               3334  *
5219  * This function doesn't cross VMA or page ta !! 3335  * This function is called with the page table lock taken. In the split ptlock
5220  * map_pages() and acquire a PTE lock only on !! 3336  * case the page table lock only protects only those entries which belong to
                                                   >> 3337  * the page table corresponding to the fault address.
                                                   >> 3338  *
                                                   >> 3339  * This function doesn't cross the VMA boundaries, in order to call map_pages()
                                                   >> 3340  * only once.
5221  *                                               3341  *
5222  * fault_around_pages defines how many pages  !! 3342  * fault_around_pages() defines how many pages we'll try to map.
5223  * do_fault_around() expects it to be set to  !! 3343  * do_fault_around() expects it to return a power of two less than or equal to
5224  * to PTRS_PER_PTE.                           !! 3344  * PTRS_PER_PTE.
5225  *                                            !! 3345  *
5226  * The virtual address of the area that we ma !! 3346  * The virtual address of the area that we map is naturally aligned to the
5227  * fault_around_pages * PAGE_SIZE rounded dow !! 3347  * fault_around_pages() value (and therefore to page order).  This way it's
5228  * (and therefore to page order).  This way i !! 3348  * easier to guarantee that we don't cross page table boundaries.
5229  * that we don't cross page table boundaries. !! 3349  */
5230  */                                           !! 3350 static int do_fault_around(struct vm_fault *vmf)
5231 static vm_fault_t do_fault_around(struct vm_f !! 3351 {
5232 {                                             !! 3352         unsigned long address = vmf->address, nr_pages, mask;
5233         pgoff_t nr_pages = READ_ONCE(fault_ar !! 3353         pgoff_t start_pgoff = vmf->pgoff;
5234         pgoff_t pte_off = pte_index(vmf->addr !! 3354         pgoff_t end_pgoff;
5235         /* The page offset of vmf->address wi !! 3355         int off, ret = 0;
5236         pgoff_t vma_off = vmf->pgoff - vmf->v !! 3356 
5237         pgoff_t from_pte, to_pte;             !! 3357         nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
5238         vm_fault_t ret;                       !! 3358         mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
5239                                               !! 3359 
5240         /* The PTE offset of the start addres !! 3360         vmf->address = max(address & mask, vmf->vma->vm_start);
5241         from_pte = max(ALIGN_DOWN(pte_off, nr !! 3361         off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
5242                        pte_off - min(pte_off, !! 3362         start_pgoff -= off;
5243                                               !! 3363 
5244         /* The PTE offset of the end address, !! 3364         /*
5245         to_pte = min3(from_pte + nr_pages, (p !! 3365          *  end_pgoff is either end of page table or end of vma
5246                       pte_off + vma_pages(vmf !! 3366          *  or fault_around_pages() from start_pgoff, depending what is nearest.
                                                   >> 3367          */
                                                   >> 3368         end_pgoff = start_pgoff -
                                                   >> 3369                 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
                                                   >> 3370                 PTRS_PER_PTE - 1;
                                                   >> 3371         end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
                                                   >> 3372                         start_pgoff + nr_pages - 1);
5247                                                  3373 
5248         if (pmd_none(*vmf->pmd)) {               3374         if (pmd_none(*vmf->pmd)) {
5249                 vmf->prealloc_pte = pte_alloc !! 3375                 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
                                                   >> 3376                                                   vmf->address);
5250                 if (!vmf->prealloc_pte)          3377                 if (!vmf->prealloc_pte)
5251                         return VM_FAULT_OOM;  !! 3378                         goto out;
                                                   >> 3379                 smp_wmb(); /* See comment in __pte_alloc() */
5252         }                                        3380         }
5253                                                  3381 
5254         rcu_read_lock();                      !! 3382         vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
5255         ret = vmf->vma->vm_ops->map_pages(vmf << 
5256                         vmf->pgoff + from_pte << 
5257                         vmf->pgoff + to_pte - << 
5258         rcu_read_unlock();                    << 
5259                                               << 
5260         return ret;                           << 
5261 }                                             << 
5262                                                  3383 
5263 /* Return true if we should do read fault-aro !! 3384         /* Huge page is mapped? Page fault is solved */
5264 static inline bool should_fault_around(struct !! 3385         if (pmd_trans_huge(*vmf->pmd)) {
5265 {                                             !! 3386                 ret = VM_FAULT_NOPAGE;
5266         /* No ->map_pages?  No way to fault a !! 3387                 goto out;
5267         if (!vmf->vma->vm_ops->map_pages)     !! 3388         }
5268                 return false;                 << 
5269                                                  3389 
5270         if (uffd_disable_fault_around(vmf->vm !! 3390         /* ->map_pages() haven't done anything useful. Cold page cache? */
5271                 return false;                 !! 3391         if (!vmf->pte)
                                                   >> 3392                 goto out;
5272                                                  3393 
5273         /* A single page implies no faulting  !! 3394         /* check if the page fault is solved */
5274         return fault_around_pages > 1;        !! 3395         vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
                                                   >> 3396         if (!pte_none(*vmf->pte))
                                                   >> 3397                 ret = VM_FAULT_NOPAGE;
                                                   >> 3398         pte_unmap_unlock(vmf->pte, vmf->ptl);
                                                   >> 3399 out:
                                                   >> 3400         vmf->address = address;
                                                   >> 3401         vmf->pte = NULL;
                                                   >> 3402         return ret;
5275 }                                                3403 }
5276                                                  3404 
5277 static vm_fault_t do_read_fault(struct vm_fau !! 3405 static int do_read_fault(struct vm_fault *vmf)
5278 {                                                3406 {
5279         vm_fault_t ret = 0;                   !! 3407         struct vm_area_struct *vma = vmf->vma;
5280         struct folio *folio;                  !! 3408         int ret = 0;
5281                                                  3409 
5282         /*                                       3410         /*
5283          * Let's call ->map_pages() first and    3411          * Let's call ->map_pages() first and use ->fault() as fallback
5284          * if page by the offset is not ready    3412          * if page by the offset is not ready to be mapped (cold cache or
5285          * something).                           3413          * something).
5286          */                                      3414          */
5287         if (should_fault_around(vmf)) {       !! 3415         if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
5288                 ret = do_fault_around(vmf);      3416                 ret = do_fault_around(vmf);
5289                 if (ret)                         3417                 if (ret)
5290                         return ret;              3418                         return ret;
5291         }                                        3419         }
5292                                                  3420 
5293         ret = vmf_can_call_fault(vmf);        << 
5294         if (ret)                              << 
5295                 return ret;                   << 
5296                                               << 
5297         ret = __do_fault(vmf);                   3421         ret = __do_fault(vmf);
5298         if (unlikely(ret & (VM_FAULT_ERROR |     3422         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5299                 return ret;                      3423                 return ret;
5300                                                  3424 
5301         ret |= finish_fault(vmf);                3425         ret |= finish_fault(vmf);
5302         folio = page_folio(vmf->page);        !! 3426         unlock_page(vmf->page);
5303         folio_unlock(folio);                  << 
5304         if (unlikely(ret & (VM_FAULT_ERROR |     3427         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5305                 folio_put(folio);             !! 3428                 put_page(vmf->page);
5306         return ret;                              3429         return ret;
5307 }                                                3430 }
5308                                                  3431 
5309 static vm_fault_t do_cow_fault(struct vm_faul !! 3432 static int do_cow_fault(struct vm_fault *vmf)
5310 {                                                3433 {
5311         struct vm_area_struct *vma = vmf->vma    3434         struct vm_area_struct *vma = vmf->vma;
5312         struct folio *folio;                  !! 3435         int ret;
5313         vm_fault_t ret;                       << 
5314                                                  3436 
5315         ret = vmf_can_call_fault(vmf);        !! 3437         if (unlikely(anon_vma_prepare(vma)))
5316         if (!ret)                             !! 3438                 return VM_FAULT_OOM;
5317                 ret = vmf_anon_prepare(vmf);  << 
5318         if (ret)                              << 
5319                 return ret;                   << 
5320                                                  3439 
5321         folio = folio_prealloc(vma->vm_mm, vm !! 3440         vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
5322         if (!folio)                           !! 3441         if (!vmf->cow_page)
5323                 return VM_FAULT_OOM;             3442                 return VM_FAULT_OOM;
5324                                                  3443 
5325         vmf->cow_page = &folio->page;         !! 3444         if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
                                                   >> 3445                                 &vmf->memcg, false)) {
                                                   >> 3446                 put_page(vmf->cow_page);
                                                   >> 3447                 return VM_FAULT_OOM;
                                                   >> 3448         }
5326                                                  3449 
5327         ret = __do_fault(vmf);                   3450         ret = __do_fault(vmf);
5328         if (unlikely(ret & (VM_FAULT_ERROR |     3451         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5329                 goto uncharge_out;               3452                 goto uncharge_out;
5330         if (ret & VM_FAULT_DONE_COW)             3453         if (ret & VM_FAULT_DONE_COW)
5331                 return ret;                      3454                 return ret;
5332                                                  3455 
5333         if (copy_mc_user_highpage(vmf->cow_pa !! 3456         copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
5334                 ret = VM_FAULT_HWPOISON;      !! 3457         __SetPageUptodate(vmf->cow_page);
5335                 goto unlock;                  << 
5336         }                                     << 
5337         __folio_mark_uptodate(folio);         << 
5338                                                  3458 
5339         ret |= finish_fault(vmf);                3459         ret |= finish_fault(vmf);
5340 unlock:                                       << 
5341         unlock_page(vmf->page);                  3460         unlock_page(vmf->page);
5342         put_page(vmf->page);                     3461         put_page(vmf->page);
5343         if (unlikely(ret & (VM_FAULT_ERROR |     3462         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5344                 goto uncharge_out;               3463                 goto uncharge_out;
5345         return ret;                              3464         return ret;
5346 uncharge_out:                                    3465 uncharge_out:
5347         folio_put(folio);                     !! 3466         mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
                                                   >> 3467         put_page(vmf->cow_page);
5348         return ret;                              3468         return ret;
5349 }                                                3469 }
5350                                                  3470 
5351 static vm_fault_t do_shared_fault(struct vm_f !! 3471 static int do_shared_fault(struct vm_fault *vmf)
5352 {                                                3472 {
5353         struct vm_area_struct *vma = vmf->vma    3473         struct vm_area_struct *vma = vmf->vma;
5354         vm_fault_t ret, tmp;                  !! 3474         int ret, tmp;
5355         struct folio *folio;                  << 
5356                                               << 
5357         ret = vmf_can_call_fault(vmf);        << 
5358         if (ret)                              << 
5359                 return ret;                   << 
5360                                                  3475 
5361         ret = __do_fault(vmf);                   3476         ret = __do_fault(vmf);
5362         if (unlikely(ret & (VM_FAULT_ERROR |     3477         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5363                 return ret;                      3478                 return ret;
5364                                                  3479 
5365         folio = page_folio(vmf->page);        << 
5366                                               << 
5367         /*                                       3480         /*
5368          * Check if the backing address space    3481          * Check if the backing address space wants to know that the page is
5369          * about to become writable              3482          * about to become writable
5370          */                                      3483          */
5371         if (vma->vm_ops->page_mkwrite) {         3484         if (vma->vm_ops->page_mkwrite) {
5372                 folio_unlock(folio);          !! 3485                 unlock_page(vmf->page);
5373                 tmp = do_page_mkwrite(vmf, fo !! 3486                 tmp = do_page_mkwrite(vmf);
5374                 if (unlikely(!tmp ||             3487                 if (unlikely(!tmp ||
5375                                 (tmp & (VM_FA    3488                                 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5376                         folio_put(folio);     !! 3489                         put_page(vmf->page);
5377                         return tmp;              3490                         return tmp;
5378                 }                                3491                 }
5379         }                                        3492         }
5380                                                  3493 
5381         ret |= finish_fault(vmf);                3494         ret |= finish_fault(vmf);
5382         if (unlikely(ret & (VM_FAULT_ERROR |     3495         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5383                                         VM_FA    3496                                         VM_FAULT_RETRY))) {
5384                 folio_unlock(folio);          !! 3497                 unlock_page(vmf->page);
5385                 folio_put(folio);             !! 3498                 put_page(vmf->page);
5386                 return ret;                      3499                 return ret;
5387         }                                        3500         }
5388                                                  3501 
5389         ret |= fault_dirty_shared_page(vmf);  !! 3502         fault_dirty_shared_page(vma, vmf->page);
5390         return ret;                              3503         return ret;
5391 }                                                3504 }
5392                                                  3505 
5393 /*                                               3506 /*
5394  * We enter with non-exclusive mmap_lock (to  !! 3507  * We enter with non-exclusive mmap_sem (to exclude vma changes,
5395  * but allow concurrent faults).                 3508  * but allow concurrent faults).
5396  * The mmap_lock may have been released depen !! 3509  * The mmap_sem may have been released depending on flags and our
5397  * return value.  See filemap_fault() and __f !! 3510  * return value.  See filemap_fault() and __lock_page_or_retry().
5398  * If mmap_lock is released, vma may become i << 
5399  * by other thread calling munmap()).         << 
5400  */                                              3511  */
5401 static vm_fault_t do_fault(struct vm_fault *v !! 3512 static int do_fault(struct vm_fault *vmf)
5402 {                                                3513 {
5403         struct vm_area_struct *vma = vmf->vma    3514         struct vm_area_struct *vma = vmf->vma;
5404         struct mm_struct *vm_mm = vma->vm_mm; !! 3515         int ret;
5405         vm_fault_t ret;                       << 
5406                                               << 
5407         /*                                    << 
5408          * The VMA was not fully populated on << 
5409          */                                   << 
5410         if (!vma->vm_ops->fault) {            << 
5411                 vmf->pte = pte_offset_map_loc << 
5412                                               << 
5413                 if (unlikely(!vmf->pte))      << 
5414                         ret = VM_FAULT_SIGBUS << 
5415                 else {                        << 
5416                         /*                    << 
5417                          * Make sure this is  << 
5418                          * by holding ptl and << 
5419                          * of pte involves: t << 
5420                          * we don't have conc << 
5421                          * followed by an upd << 
5422                          */                   << 
5423                         if (unlikely(pte_none << 
5424                                 ret = VM_FAUL << 
5425                         else                  << 
5426                                 ret = VM_FAUL << 
5427                                                  3516 
5428                         pte_unmap_unlock(vmf- !! 3517         /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
5429                 }                             !! 3518         if (!vma->vm_ops->fault)
5430         } else if (!(vmf->flags & FAULT_FLAG_ !! 3519                 ret = VM_FAULT_SIGBUS;
                                                   >> 3520         else if (!(vmf->flags & FAULT_FLAG_WRITE))
5431                 ret = do_read_fault(vmf);        3521                 ret = do_read_fault(vmf);
5432         else if (!(vma->vm_flags & VM_SHARED)    3522         else if (!(vma->vm_flags & VM_SHARED))
5433                 ret = do_cow_fault(vmf);         3523                 ret = do_cow_fault(vmf);
5434         else                                     3524         else
5435                 ret = do_shared_fault(vmf);      3525                 ret = do_shared_fault(vmf);
5436                                                  3526 
5437         /* preallocated pagetable is unused:     3527         /* preallocated pagetable is unused: free it */
5438         if (vmf->prealloc_pte) {                 3528         if (vmf->prealloc_pte) {
5439                 pte_free(vm_mm, vmf->prealloc !! 3529                 pte_free(vma->vm_mm, vmf->prealloc_pte);
5440                 vmf->prealloc_pte = NULL;        3530                 vmf->prealloc_pte = NULL;
5441         }                                        3531         }
5442         return ret;                              3532         return ret;
5443 }                                                3533 }
5444                                                  3534 
5445 int numa_migrate_check(struct folio *folio, s !! 3535 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
5446                       unsigned long addr, int !! 3536                                 unsigned long addr, int page_nid,
5447                       bool writable, int *las !! 3537                                 int *flags)
5448 {                                                3538 {
5449         struct vm_area_struct *vma = vmf->vma !! 3539         get_page(page);
5450                                               << 
5451         /*                                    << 
5452          * Avoid grouping on RO pages in gene << 
5453          * much anyway since they can be in s << 
5454          * the case where a mapping is writab << 
5455          * to it but pte_write gets cleared d << 
5456          * pte_dirty has unpredictable behavi << 
5457          * background writeback, dirty balanc << 
5458          */                                   << 
5459         if (!writable)                        << 
5460                 *flags |= TNF_NO_GROUP;       << 
5461                                               << 
5462         /*                                    << 
5463          * Flag if the folio is shared betwee << 
5464          * is later used when determining whe << 
5465          */                                   << 
5466         if (folio_likely_mapped_shared(folio) << 
5467                 *flags |= TNF_SHARED;         << 
5468         /*                                    << 
5469          * For memory tiering mode, cpupid of << 
5470          * to record page access time.  So us << 
5471          */                                   << 
5472         if (folio_use_access_time(folio))     << 
5473                 *last_cpupid = (-1 & LAST_CPU << 
5474         else                                  << 
5475                 *last_cpupid = folio_last_cpu << 
5476                                               << 
5477         /* Record the current PID acceesing V << 
5478         vma_set_access_pid_bit(vma);          << 
5479                                                  3540 
5480         count_vm_numa_event(NUMA_HINT_FAULTS)    3541         count_vm_numa_event(NUMA_HINT_FAULTS);
5481 #ifdef CONFIG_NUMA_BALANCING                  !! 3542         if (page_nid == numa_node_id()) {
5482         count_memcg_folio_events(folio, NUMA_ << 
5483 #endif                                        << 
5484         if (folio_nid(folio) == numa_node_id( << 
5485                 count_vm_numa_event(NUMA_HINT    3543                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5486                 *flags |= TNF_FAULT_LOCAL;       3544                 *flags |= TNF_FAULT_LOCAL;
5487         }                                        3545         }
5488                                                  3546 
5489         return mpol_misplaced(folio, vmf, add !! 3547         return mpol_misplaced(page, vma, addr);
5490 }                                                3548 }
5491                                                  3549 
5492 static void numa_rebuild_single_mapping(struc !! 3550 static int do_numa_page(struct vm_fault *vmf)
5493                                         unsig << 
5494                                         bool  << 
5495 {                                             << 
5496         pte_t pte, old_pte;                   << 
5497                                               << 
5498         old_pte = ptep_modify_prot_start(vma, << 
5499         pte = pte_modify(old_pte, vma->vm_pag << 
5500         pte = pte_mkyoung(pte);               << 
5501         if (writable)                         << 
5502                 pte = pte_mkwrite(pte, vma);  << 
5503         ptep_modify_prot_commit(vma, fault_ad << 
5504         update_mmu_cache_range(vmf, vma, faul << 
5505 }                                             << 
5506                                               << 
5507 static void numa_rebuild_large_mapping(struct << 
5508                                        struct << 
5509                                        bool i << 
5510 {                                             << 
5511         int nr = pte_pfn(fault_pte) - folio_p << 
5512         unsigned long start, end, addr = vmf- << 
5513         unsigned long addr_start = addr - (nr << 
5514         unsigned long pt_start = ALIGN_DOWN(a << 
5515         pte_t *start_ptep;                    << 
5516                                               << 
5517         /* Stay within the VMA and within the << 
5518         start = max3(addr_start, pt_start, vm << 
5519         end = min3(addr_start + folio_size(fo << 
5520                    vma->vm_end);              << 
5521         start_ptep = vmf->pte - ((addr - star << 
5522                                               << 
5523         /* Restore all PTEs' mapping of the l << 
5524         for (addr = start; addr != end; start << 
5525                 pte_t ptent = ptep_get(start_ << 
5526                 bool writable = false;        << 
5527                                               << 
5528                 if (!pte_present(ptent) || !p << 
5529                         continue;             << 
5530                                               << 
5531                 if (pfn_folio(pte_pfn(ptent)) << 
5532                         continue;             << 
5533                                               << 
5534                 if (!ignore_writable) {       << 
5535                         ptent = pte_modify(pt << 
5536                         writable = pte_write( << 
5537                         if (!writable && pte_ << 
5538                             can_change_pte_wr << 
5539                                 writable = tr << 
5540                 }                             << 
5541                                               << 
5542                 numa_rebuild_single_mapping(v << 
5543         }                                     << 
5544 }                                             << 
5545                                               << 
5546 static vm_fault_t do_numa_page(struct vm_faul << 
5547 {                                                3551 {
5548         struct vm_area_struct *vma = vmf->vma    3552         struct vm_area_struct *vma = vmf->vma;
5549         struct folio *folio = NULL;           !! 3553         struct page *page = NULL;
5550         int nid = NUMA_NO_NODE;               !! 3554         int page_nid = -1;
5551         bool writable = false, ignore_writabl << 
5552         bool pte_write_upgrade = vma_wants_ma << 
5553         int last_cpupid;                         3555         int last_cpupid;
5554         int target_nid;                          3556         int target_nid;
5555         pte_t pte, old_pte;                   !! 3557         bool migrated = false;
5556         int flags = 0, nr_pages;              !! 3558         pte_t pte;
                                                   >> 3559         bool was_writable = pte_savedwrite(vmf->orig_pte);
                                                   >> 3560         int flags = 0;
5557                                                  3561 
5558         /*                                       3562         /*
5559          * The pte cannot be used safely unti !! 3563          * The "pte" at this point cannot be used safely without
5560          * table lock, that its contents have !! 3564          * validation through pte_unmap_same(). It's of NUMA type but
                                                   >> 3565          * the pfn may be screwed if the read is non atomic.
5561          */                                      3566          */
                                                   >> 3567         vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
5562         spin_lock(vmf->ptl);                     3568         spin_lock(vmf->ptl);
5563         /* Read the live PTE from the page ta !! 3569         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
5564         old_pte = ptep_get(vmf->pte);         << 
5565                                               << 
5566         if (unlikely(!pte_same(old_pte, vmf-> << 
5567                 pte_unmap_unlock(vmf->pte, vm    3570                 pte_unmap_unlock(vmf->pte, vmf->ptl);
5568                 return 0;                     !! 3571                 goto out;
5569         }                                        3572         }
5570                                                  3573 
5571         pte = pte_modify(old_pte, vma->vm_pag << 
5572                                               << 
5573         /*                                       3574         /*
5574          * Detect now whether the PTE could b !! 3575          * Make it present again, Depending on how arch implementes non
5575          * is only valid while holding the PT !! 3576          * accessible ptes, some can allow access by kernel mode.
5576          */                                      3577          */
5577         writable = pte_write(pte);            !! 3578         pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte);
5578         if (!writable && pte_write_upgrade && !! 3579         pte = pte_modify(pte, vma->vm_page_prot);
5579             can_change_pte_writable(vma, vmf- !! 3580         pte = pte_mkyoung(pte);
5580                 writable = true;              !! 3581         if (was_writable)
5581                                               !! 3582                 pte = pte_mkwrite(pte);
5582         folio = vm_normal_folio(vma, vmf->add !! 3583         ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
5583         if (!folio || folio_is_zone_device(fo !! 3584         update_mmu_cache(vma, vmf->address, vmf->pte);
5584                 goto out_map;                 << 
5585                                               << 
5586         nid = folio_nid(folio);               << 
5587         nr_pages = folio_nr_pages(folio);     << 
5588                                               << 
5589         target_nid = numa_migrate_check(folio << 
5590                                         writa << 
5591         if (target_nid == NUMA_NO_NODE)       << 
5592                 goto out_map;                 << 
5593         if (migrate_misplaced_folio_prepare(f << 
5594                 flags |= TNF_MIGRATE_FAIL;    << 
5595                 goto out_map;                 << 
5596         }                                     << 
5597         /* The folio is isolated and isolatio << 
5598         pte_unmap_unlock(vmf->pte, vmf->ptl); << 
5599         writable = false;                     << 
5600         ignore_writable = true;               << 
5601                                                  3585 
5602         /* Migrate to the requested node */   !! 3586         page = vm_normal_page(vma, vmf->address, pte);
5603         if (!migrate_misplaced_folio(folio, v !! 3587         if (!page) {
5604                 nid = target_nid;             !! 3588                 pte_unmap_unlock(vmf->pte, vmf->ptl);
5605                 flags |= TNF_MIGRATED;        << 
5606                 task_numa_fault(last_cpupid,  << 
5607                 return 0;                        3589                 return 0;
5608         }                                        3590         }
5609                                                  3591 
5610         flags |= TNF_MIGRATE_FAIL;            !! 3592         /* TODO: handle PTE-mapped THP */
5611         vmf->pte = pte_offset_map_lock(vma->v !! 3593         if (PageCompound(page)) {
5612                                        vmf->a << 
5613         if (unlikely(!vmf->pte))              << 
5614                 return 0;                     << 
5615         if (unlikely(!pte_same(ptep_get(vmf-> << 
5616                 pte_unmap_unlock(vmf->pte, vm    3594                 pte_unmap_unlock(vmf->pte, vmf->ptl);
5617                 return 0;                        3595                 return 0;
5618         }                                        3596         }
5619 out_map:                                      !! 3597 
5620         /*                                       3598         /*
5621          * Make it present again, depending o !! 3599          * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5622          * non-accessible ptes, some can allo !! 3600          * much anyway since they can be in shared cache state. This misses
                                                   >> 3601          * the case where a mapping is writable but the process never writes
                                                   >> 3602          * to it but pte_write gets cleared during protection updates and
                                                   >> 3603          * pte_dirty has unpredictable behaviour between PTE scan updates,
                                                   >> 3604          * background writeback, dirty balancing and application behaviour.
5623          */                                      3605          */
5624         if (folio && folio_test_large(folio)) !! 3606         if (!pte_write(pte))
5625                 numa_rebuild_large_mapping(vm !! 3607                 flags |= TNF_NO_GROUP;
5626                                            pt !! 3608 
5627         else                                  !! 3609         /*
5628                 numa_rebuild_single_mapping(v !! 3610          * Flag if the page is shared between multiple address spaces. This
5629                                             w !! 3611          * is later used when determining whether to group tasks together
                                                   >> 3612          */
                                                   >> 3613         if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
                                                   >> 3614                 flags |= TNF_SHARED;
                                                   >> 3615 
                                                   >> 3616         last_cpupid = page_cpupid_last(page);
                                                   >> 3617         page_nid = page_to_nid(page);
                                                   >> 3618         target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
                                                   >> 3619                         &flags);
5630         pte_unmap_unlock(vmf->pte, vmf->ptl);    3620         pte_unmap_unlock(vmf->pte, vmf->ptl);
                                                   >> 3621         if (target_nid == -1) {
                                                   >> 3622                 put_page(page);
                                                   >> 3623                 goto out;
                                                   >> 3624         }
5631                                                  3625 
5632         if (nid != NUMA_NO_NODE)              !! 3626         /* Migrate to the requested node */
5633                 task_numa_fault(last_cpupid,  !! 3627         migrated = migrate_misplaced_page(page, vma, target_nid);
                                                   >> 3628         if (migrated) {
                                                   >> 3629                 page_nid = target_nid;
                                                   >> 3630                 flags |= TNF_MIGRATED;
                                                   >> 3631         } else
                                                   >> 3632                 flags |= TNF_MIGRATE_FAIL;
                                                   >> 3633 
                                                   >> 3634 out:
                                                   >> 3635         if (page_nid != -1)
                                                   >> 3636                 task_numa_fault(last_cpupid, page_nid, 1, flags);
5634         return 0;                                3637         return 0;
5635 }                                                3638 }
5636                                                  3639 
5637 static inline vm_fault_t create_huge_pmd(stru !! 3640 static inline int create_huge_pmd(struct vm_fault *vmf)
5638 {                                                3641 {
5639         struct vm_area_struct *vma = vmf->vma !! 3642         if (vma_is_anonymous(vmf->vma))
5640         if (vma_is_anonymous(vma))            << 
5641                 return do_huge_pmd_anonymous_    3643                 return do_huge_pmd_anonymous_page(vmf);
5642         if (vma->vm_ops->huge_fault)          !! 3644         if (vmf->vma->vm_ops->huge_fault)
5643                 return vma->vm_ops->huge_faul !! 3645                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
5644         return VM_FAULT_FALLBACK;                3646         return VM_FAULT_FALLBACK;
5645 }                                                3647 }
5646                                                  3648 
5647 /* `inline' is required to avoid gcc 4.1.2 bu !! 3649 static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
5648 static inline vm_fault_t wp_huge_pmd(struct v << 
5649 {                                                3650 {
5650         struct vm_area_struct *vma = vmf->vma !! 3651         if (vma_is_anonymous(vmf->vma))
5651         const bool unshare = vmf->flags & FAU !! 3652                 return do_huge_pmd_wp_page(vmf, orig_pmd);
5652         vm_fault_t ret;                       !! 3653         if (vmf->vma->vm_ops->huge_fault)
5653                                               !! 3654                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
5654         if (vma_is_anonymous(vma)) {          !! 3655 
5655                 if (likely(!unshare) &&       !! 3656         /* COW handled on pte level: split pmd */
5656                     userfaultfd_huge_pmd_wp(v !! 3657         VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
5657                         if (userfaultfd_wp_as !! 3658         __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
5658                                 goto split;   << 
5659                         return handle_userfau << 
5660                 }                             << 
5661                 return do_huge_pmd_wp_page(vm << 
5662         }                                     << 
5663                                               << 
5664         if (vma->vm_flags & (VM_SHARED | VM_M << 
5665                 if (vma->vm_ops->huge_fault)  << 
5666                         ret = vma->vm_ops->hu << 
5667                         if (!(ret & VM_FAULT_ << 
5668                                 return ret;   << 
5669                 }                             << 
5670         }                                     << 
5671                                               << 
5672 split:                                        << 
5673         /* COW or write-notify handled on pte << 
5674         __split_huge_pmd(vma, vmf->pmd, vmf-> << 
5675                                                  3659 
5676         return VM_FAULT_FALLBACK;                3660         return VM_FAULT_FALLBACK;
5677 }                                                3661 }
5678                                                  3662 
5679 static vm_fault_t create_huge_pud(struct vm_f !! 3663 static inline bool vma_is_accessible(struct vm_area_struct *vma)
5680 {                                                3664 {
5681 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&   !! 3665         return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
5682         defined(CONFIG_HAVE_ARCH_TRANSPARENT_ !! 3666 }
5683         struct vm_area_struct *vma = vmf->vma !! 3667 
                                                   >> 3668 static int create_huge_pud(struct vm_fault *vmf)
                                                   >> 3669 {
                                                   >> 3670 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5684         /* No support for anonymous transpare    3671         /* No support for anonymous transparent PUD pages yet */
5685         if (vma_is_anonymous(vma))            !! 3672         if (vma_is_anonymous(vmf->vma))
5686                 return VM_FAULT_FALLBACK;        3673                 return VM_FAULT_FALLBACK;
5687         if (vma->vm_ops->huge_fault)          !! 3674         if (vmf->vma->vm_ops->huge_fault)
5688                 return vma->vm_ops->huge_faul !! 3675                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
5689 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */         3676 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5690         return VM_FAULT_FALLBACK;                3677         return VM_FAULT_FALLBACK;
5691 }                                                3678 }
5692                                                  3679 
5693 static vm_fault_t wp_huge_pud(struct vm_fault !! 3680 static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5694 {                                                3681 {
5695 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&   !! 3682 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5696         defined(CONFIG_HAVE_ARCH_TRANSPARENT_ << 
5697         struct vm_area_struct *vma = vmf->vma << 
5698         vm_fault_t ret;                       << 
5699                                               << 
5700         /* No support for anonymous transpare    3683         /* No support for anonymous transparent PUD pages yet */
5701         if (vma_is_anonymous(vma))            !! 3684         if (vma_is_anonymous(vmf->vma))
5702                 goto split;                   !! 3685                 return VM_FAULT_FALLBACK;
5703         if (vma->vm_flags & (VM_SHARED | VM_M !! 3686         if (vmf->vma->vm_ops->huge_fault)
5704                 if (vma->vm_ops->huge_fault)  !! 3687                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
5705                         ret = vma->vm_ops->hu !! 3688 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5706                         if (!(ret & VM_FAULT_ << 
5707                                 return ret;   << 
5708                 }                             << 
5709         }                                     << 
5710 split:                                        << 
5711         /* COW or write-notify not handled on << 
5712         __split_huge_pud(vma, vmf->pud, vmf-> << 
5713 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONF << 
5714         return VM_FAULT_FALLBACK;                3689         return VM_FAULT_FALLBACK;
5715 }                                                3690 }
5716                                                  3691 
5717 /*                                               3692 /*
5718  * These routines also need to handle stuff l    3693  * These routines also need to handle stuff like marking pages dirty
5719  * and/or accessed for architectures that don    3694  * and/or accessed for architectures that don't do it in hardware (most
5720  * RISC architectures).  The early dirtying i    3695  * RISC architectures).  The early dirtying is also good on the i386.
5721  *                                               3696  *
5722  * There is also a hook called "update_mmu_ca    3697  * There is also a hook called "update_mmu_cache()" that architectures
5723  * with external mmu caches can use to update    3698  * with external mmu caches can use to update those (ie the Sparc or
5724  * PowerPC hashed page tables that act as ext    3699  * PowerPC hashed page tables that act as extended TLBs).
5725  *                                               3700  *
5726  * We enter with non-exclusive mmap_lock (to  !! 3701  * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
5727  * concurrent faults).                           3702  * concurrent faults).
5728  *                                               3703  *
5729  * The mmap_lock may have been released depen !! 3704  * The mmap_sem may have been released depending on flags and our return value.
5730  * See filemap_fault() and __folio_lock_or_re !! 3705  * See filemap_fault() and __lock_page_or_retry().
5731  */                                              3706  */
5732 static vm_fault_t handle_pte_fault(struct vm_ !! 3707 static int handle_pte_fault(struct vm_fault *vmf)
5733 {                                                3708 {
5734         pte_t entry;                             3709         pte_t entry;
5735                                                  3710 
5736         if (unlikely(pmd_none(*vmf->pmd))) {     3711         if (unlikely(pmd_none(*vmf->pmd))) {
5737                 /*                               3712                 /*
5738                  * Leave __pte_alloc() until     3713                  * Leave __pte_alloc() until later: because vm_ops->fault may
5739                  * want to allocate huge page    3714                  * want to allocate huge page, and if we expose page table
5740                  * for an instant, it will be    3715                  * for an instant, it will be difficult to retract from
5741                  * concurrent faults and from    3716                  * concurrent faults and from rmap lookups.
5742                  */                              3717                  */
5743                 vmf->pte = NULL;                 3718                 vmf->pte = NULL;
5744                 vmf->flags &= ~FAULT_FLAG_ORI << 
5745         } else {                                 3719         } else {
                                                   >> 3720                 /* See comment in pte_alloc_one_map() */
                                                   >> 3721                 if (pmd_devmap_trans_unstable(vmf->pmd))
                                                   >> 3722                         return 0;
5746                 /*                               3723                 /*
5747                  * A regular pmd is establish    3724                  * A regular pmd is established and it can't morph into a huge
5748                  * pmd by anon khugepaged, si !! 3725                  * pmd from under us anymore at this point because we hold the
5749                  * mode; but shmem or file co !! 3726                  * mmap_sem read mode and khugepaged takes it in write mode.
5750                  * it into a huge pmd: just r !! 3727                  * So now it's safe to run pte_offset_map().
5751                  */                              3728                  */
5752                 vmf->pte = pte_offset_map_nol !! 3729                 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
5753                                               !! 3730                 vmf->orig_pte = *vmf->pte;
5754                 if (unlikely(!vmf->pte))      << 
5755                         return 0;             << 
5756                 vmf->orig_pte = ptep_get_lock << 
5757                 vmf->flags |= FAULT_FLAG_ORIG << 
5758                                                  3731 
                                                   >> 3732                 /*
                                                   >> 3733                  * some architectures can have larger ptes than wordsize,
                                                   >> 3734                  * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
                                                   >> 3735                  * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee
                                                   >> 3736                  * atomic accesses.  The code below just needs a consistent
                                                   >> 3737                  * view for the ifs and we later double check anyway with the
                                                   >> 3738                  * ptl lock held. So here a barrier will do.
                                                   >> 3739                  */
                                                   >> 3740                 barrier();
5759                 if (pte_none(vmf->orig_pte))     3741                 if (pte_none(vmf->orig_pte)) {
5760                         pte_unmap(vmf->pte);     3742                         pte_unmap(vmf->pte);
5761                         vmf->pte = NULL;         3743                         vmf->pte = NULL;
5762                 }                                3744                 }
5763         }                                        3745         }
5764                                                  3746 
5765         if (!vmf->pte)                        !! 3747         if (!vmf->pte) {
5766                 return do_pte_missing(vmf);   !! 3748                 if (vma_is_anonymous(vmf->vma))
                                                   >> 3749                         return do_anonymous_page(vmf);
                                                   >> 3750                 else
                                                   >> 3751                         return do_fault(vmf);
                                                   >> 3752         }
5767                                                  3753 
5768         if (!pte_present(vmf->orig_pte))         3754         if (!pte_present(vmf->orig_pte))
5769                 return do_swap_page(vmf);        3755                 return do_swap_page(vmf);
5770                                                  3756 
5771         if (pte_protnone(vmf->orig_pte) && vm    3757         if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5772                 return do_numa_page(vmf);        3758                 return do_numa_page(vmf);
5773                                                  3759 
                                                   >> 3760         vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
5774         spin_lock(vmf->ptl);                     3761         spin_lock(vmf->ptl);
5775         entry = vmf->orig_pte;                   3762         entry = vmf->orig_pte;
5776         if (unlikely(!pte_same(ptep_get(vmf-> !! 3763         if (unlikely(!pte_same(*vmf->pte, entry)))
5777                 update_mmu_tlb(vmf->vma, vmf- << 
5778                 goto unlock;                     3764                 goto unlock;
5779         }                                     !! 3765         if (vmf->flags & FAULT_FLAG_WRITE) {
5780         if (vmf->flags & (FAULT_FLAG_WRITE|FA << 
5781                 if (!pte_write(entry))           3766                 if (!pte_write(entry))
5782                         return do_wp_page(vmf    3767                         return do_wp_page(vmf);
5783                 else if (likely(vmf->flags &  !! 3768                 entry = pte_mkdirty(entry);
5784                         entry = pte_mkdirty(e << 
5785         }                                        3769         }
5786         entry = pte_mkyoung(entry);              3770         entry = pte_mkyoung(entry);
5787         if (ptep_set_access_flags(vmf->vma, v    3771         if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5788                                 vmf->flags &     3772                                 vmf->flags & FAULT_FLAG_WRITE)) {
5789                 update_mmu_cache_range(vmf, v !! 3773                 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
5790                                 vmf->pte, 1); << 
5791         } else {                                 3774         } else {
5792                 /* Skip spurious TLB flush fo << 
5793                 if (vmf->flags & FAULT_FLAG_T << 
5794                         goto unlock;          << 
5795                 /*                               3775                 /*
5796                  * This is needed only for pr    3776                  * This is needed only for protection faults but the arch code
5797                  * is not yet telling us if t    3777                  * is not yet telling us if this is a protection fault or not.
5798                  * This still avoids useless     3778                  * This still avoids useless tlb flushes for .text page faults
5799                  * with threads.                 3779                  * with threads.
5800                  */                              3780                  */
5801                 if (vmf->flags & FAULT_FLAG_W    3781                 if (vmf->flags & FAULT_FLAG_WRITE)
5802                         flush_tlb_fix_spuriou !! 3782                         flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
5803                                               << 
5804         }                                        3783         }
5805 unlock:                                          3784 unlock:
5806         pte_unmap_unlock(vmf->pte, vmf->ptl);    3785         pte_unmap_unlock(vmf->pte, vmf->ptl);
5807         return 0;                                3786         return 0;
5808 }                                                3787 }
5809                                                  3788 
5810 /*                                               3789 /*
5811  * On entry, we hold either the VMA lock or t !! 3790  * By the time we get here, we already hold the mm semaphore
5812  * (FAULT_FLAG_VMA_LOCK tells you which).  If !! 3791  *
5813  * the result, the mmap_lock is not held on e !! 3792  * The mmap_sem may have been released depending on flags and our
5814  * and __folio_lock_or_retry().               !! 3793  * return value.  See filemap_fault() and __lock_page_or_retry().
5815  */                                              3794  */
5816 static vm_fault_t __handle_mm_fault(struct vm !! 3795 static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5817                 unsigned long address, unsign !! 3796                 unsigned int flags)
5818 {                                                3797 {
5819         struct vm_fault vmf = {                  3798         struct vm_fault vmf = {
5820                 .vma = vma,                      3799                 .vma = vma,
5821                 .address = address & PAGE_MAS    3800                 .address = address & PAGE_MASK,
5822                 .real_address = address,      << 
5823                 .flags = flags,                  3801                 .flags = flags,
5824                 .pgoff = linear_page_index(vm    3802                 .pgoff = linear_page_index(vma, address),
5825                 .gfp_mask = __get_fault_gfp_m    3803                 .gfp_mask = __get_fault_gfp_mask(vma),
5826         };                                       3804         };
5827         struct mm_struct *mm = vma->vm_mm;       3805         struct mm_struct *mm = vma->vm_mm;
5828         unsigned long vm_flags = vma->vm_flag << 
5829         pgd_t *pgd;                              3806         pgd_t *pgd;
5830         p4d_t *p4d;                              3807         p4d_t *p4d;
5831         vm_fault_t ret;                       !! 3808         int ret;
5832                                                  3809 
5833         pgd = pgd_offset(mm, address);           3810         pgd = pgd_offset(mm, address);
5834         p4d = p4d_alloc(mm, pgd, address);       3811         p4d = p4d_alloc(mm, pgd, address);
5835         if (!p4d)                                3812         if (!p4d)
5836                 return VM_FAULT_OOM;             3813                 return VM_FAULT_OOM;
5837                                                  3814 
5838         vmf.pud = pud_alloc(mm, p4d, address)    3815         vmf.pud = pud_alloc(mm, p4d, address);
5839         if (!vmf.pud)                            3816         if (!vmf.pud)
5840                 return VM_FAULT_OOM;             3817                 return VM_FAULT_OOM;
5841 retry_pud:                                    !! 3818         if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
5842         if (pud_none(*vmf.pud) &&             << 
5843             thp_vma_allowable_order(vma, vm_f << 
5844                                 TVA_IN_PF | T << 
5845                 ret = create_huge_pud(&vmf);     3819                 ret = create_huge_pud(&vmf);
5846                 if (!(ret & VM_FAULT_FALLBACK    3820                 if (!(ret & VM_FAULT_FALLBACK))
5847                         return ret;              3821                         return ret;
5848         } else {                                 3822         } else {
5849                 pud_t orig_pud = *vmf.pud;       3823                 pud_t orig_pud = *vmf.pud;
5850                                                  3824 
5851                 barrier();                       3825                 barrier();
5852                 if (pud_trans_huge(orig_pud)     3826                 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
                                                   >> 3827                         unsigned int dirty = flags & FAULT_FLAG_WRITE;
5853                                                  3828 
5854                         /*                    !! 3829                         /* NUMA case for anonymous PUDs would go here */
5855                          * TODO once we suppo !! 3830 
5856                          * FAULT_FLAG_UNSHARE !! 3831                         if (dirty && !pud_write(orig_pud)) {
5857                          */                   << 
5858                         if ((flags & FAULT_FL << 
5859                                 ret = wp_huge    3832                                 ret = wp_huge_pud(&vmf, orig_pud);
5860                                 if (!(ret & V    3833                                 if (!(ret & VM_FAULT_FALLBACK))
5861                                         retur    3834                                         return ret;
5862                         } else {                 3835                         } else {
5863                                 huge_pud_set_    3836                                 huge_pud_set_accessed(&vmf, orig_pud);
5864                                 return 0;        3837                                 return 0;
5865                         }                        3838                         }
5866                 }                                3839                 }
5867         }                                        3840         }
5868                                                  3841 
5869         vmf.pmd = pmd_alloc(mm, vmf.pud, addr    3842         vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5870         if (!vmf.pmd)                            3843         if (!vmf.pmd)
5871                 return VM_FAULT_OOM;             3844                 return VM_FAULT_OOM;
5872                                               !! 3845         if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
5873         /* Huge pud page fault raced with pmd << 
5874         if (pud_trans_unstable(vmf.pud))      << 
5875                 goto retry_pud;               << 
5876                                               << 
5877         if (pmd_none(*vmf.pmd) &&             << 
5878             thp_vma_allowable_order(vma, vm_f << 
5879                                 TVA_IN_PF | T << 
5880                 ret = create_huge_pmd(&vmf);     3846                 ret = create_huge_pmd(&vmf);
5881                 if (!(ret & VM_FAULT_FALLBACK    3847                 if (!(ret & VM_FAULT_FALLBACK))
5882                         return ret;              3848                         return ret;
5883         } else {                                 3849         } else {
5884                 vmf.orig_pmd = pmdp_get_lockl !! 3850                 pmd_t orig_pmd = *vmf.pmd;
5885                                                  3851 
5886                 if (unlikely(is_swap_pmd(vmf. !! 3852                 barrier();
5887                         VM_BUG_ON(thp_migrati !! 3853                 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
5888                                           !is !! 3854                         if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
5889                         if (is_pmd_migration_ !! 3855                                 return do_huge_pmd_numa_page(&vmf, orig_pmd);
5890                                 pmd_migration !! 3856 
5891                         return 0;             !! 3857                         if ((vmf.flags & FAULT_FLAG_WRITE) &&
5892                 }                             !! 3858                                         !pmd_write(orig_pmd)) {
5893                 if (pmd_trans_huge(vmf.orig_p !! 3859                                 ret = wp_huge_pmd(&vmf, orig_pmd);
5894                         if (pmd_protnone(vmf. << 
5895                                 return do_hug << 
5896                                               << 
5897                         if ((flags & (FAULT_F << 
5898                             !pmd_write(vmf.or << 
5899                                 ret = wp_huge << 
5900                                 if (!(ret & V    3860                                 if (!(ret & VM_FAULT_FALLBACK))
5901                                         retur    3861                                         return ret;
5902                         } else {                 3862                         } else {
5903                                 huge_pmd_set_ !! 3863                                 huge_pmd_set_accessed(&vmf, orig_pmd);
5904                                 return 0;        3864                                 return 0;
5905                         }                        3865                         }
5906                 }                                3866                 }
5907         }                                        3867         }
5908                                                  3868 
5909         return handle_pte_fault(&vmf);           3869         return handle_pte_fault(&vmf);
5910 }                                                3870 }
5911                                                  3871 
5912 /**                                           << 
5913  * mm_account_fault - Do page fault accountin << 
5914  * @mm: mm from which memcg should be extract << 
5915  * @regs: the pt_regs struct pointer.  When s << 
5916  *        of perf event counters, but we'll s << 
5917  *        the task who triggered this page fa << 
5918  * @address: the faulted address.             << 
5919  * @flags: the fault flags.                   << 
5920  * @ret: the fault retcode.                   << 
5921  *                                            << 
5922  * This will take care of most of the page fa << 
5923  * will also include the PERF_COUNT_SW_PAGE_F << 
5924  * updates.  However, note that the handling  << 
5925  * still be in per-arch page fault handlers a << 
5926  */                                           << 
5927 static inline void mm_account_fault(struct mm << 
5928                                     unsigned  << 
5929                                     vm_fault_ << 
5930 {                                             << 
5931         bool major;                           << 
5932                                               << 
5933         /* Incomplete faults will be accounte << 
5934         if (ret & VM_FAULT_RETRY)             << 
5935                 return;                       << 
5936                                               << 
5937         /*                                    << 
5938          * To preserve the behavior of older  << 
5939          * both successful and failed faults, << 
5940          * which ignore failed cases.         << 
5941          */                                   << 
5942         count_vm_event(PGFAULT);              << 
5943         count_memcg_event_mm(mm, PGFAULT);    << 
5944                                               << 
5945         /*                                    << 
5946          * Do not account for unsuccessful fa << 
5947          * valid).  That includes arch_vma_ac << 
5948          * reaching here. So this is not a "t << 
5949          * counter.  We should use the hw pro << 
5950          */                                   << 
5951         if (ret & VM_FAULT_ERROR)             << 
5952                 return;                       << 
5953                                               << 
5954         /*                                    << 
5955          * We define the fault as a major fau << 
5956          * is VM_FAULT_MAJOR, or if it retrie << 
5957          * handle it immediately previously). << 
5958          */                                   << 
5959         major = (ret & VM_FAULT_MAJOR) || (fl << 
5960                                               << 
5961         if (major)                            << 
5962                 current->maj_flt++;           << 
5963         else                                  << 
5964                 current->min_flt++;           << 
5965                                               << 
5966         /*                                    << 
5967          * If the fault is done for GUP, regs << 
5968          * accounting for the per thread faul << 
5969          * fault, and we skip the perf event  << 
5970          */                                   << 
5971         if (!regs)                            << 
5972                 return;                       << 
5973                                               << 
5974         if (major)                            << 
5975                 perf_sw_event(PERF_COUNT_SW_P << 
5976         else                                  << 
5977                 perf_sw_event(PERF_COUNT_SW_P << 
5978 }                                             << 
5979                                               << 
5980 #ifdef CONFIG_LRU_GEN                         << 
5981 static void lru_gen_enter_fault(struct vm_are << 
5982 {                                             << 
5983         /* the LRU algorithm only applies to  << 
5984         current->in_lru_fault = vma_has_recen << 
5985 }                                             << 
5986                                               << 
5987 static void lru_gen_exit_fault(void)          << 
5988 {                                             << 
5989         current->in_lru_fault = false;        << 
5990 }                                             << 
5991 #else                                         << 
5992 static void lru_gen_enter_fault(struct vm_are << 
5993 {                                             << 
5994 }                                             << 
5995                                               << 
5996 static void lru_gen_exit_fault(void)          << 
5997 {                                             << 
5998 }                                             << 
5999 #endif /* CONFIG_LRU_GEN */                   << 
6000                                               << 
6001 static vm_fault_t sanitize_fault_flags(struct << 
6002                                        unsign << 
6003 {                                             << 
6004         if (unlikely(*flags & FAULT_FLAG_UNSH << 
6005                 if (WARN_ON_ONCE(*flags & FAU << 
6006                         return VM_FAULT_SIGSE << 
6007                 /*                            << 
6008                  * FAULT_FLAG_UNSHARE only ap << 
6009                  * just treat it like an ordi << 
6010                  */                           << 
6011                 if (!is_cow_mapping(vma->vm_f << 
6012                         *flags &= ~FAULT_FLAG << 
6013         } else if (*flags & FAULT_FLAG_WRITE) << 
6014                 /* Write faults on read-only  << 
6015                 if (WARN_ON_ONCE(!(vma->vm_fl << 
6016                         return VM_FAULT_SIGSE << 
6017                 /* ... and FOLL_FORCE only ap << 
6018                 if (WARN_ON_ONCE(!(vma->vm_fl << 
6019                                  !is_cow_mapp << 
6020                         return VM_FAULT_SIGSE << 
6021         }                                     << 
6022 #ifdef CONFIG_PER_VMA_LOCK                    << 
6023         /*                                    << 
6024          * Per-VMA locks can't be used with F << 
6025          * the assumption that lock is droppe << 
6026          */                                   << 
6027         if (WARN_ON_ONCE((*flags &            << 
6028                         (FAULT_FLAG_VMA_LOCK  << 
6029                         (FAULT_FLAG_VMA_LOCK  << 
6030                 return VM_FAULT_SIGSEGV;      << 
6031 #endif                                        << 
6032                                               << 
6033         return 0;                             << 
6034 }                                             << 
6035                                               << 
6036 /*                                               3872 /*
6037  * By the time we get here, we already hold t    3873  * By the time we get here, we already hold the mm semaphore
6038  *                                               3874  *
6039  * The mmap_lock may have been released depen !! 3875  * The mmap_sem may have been released depending on flags and our
6040  * return value.  See filemap_fault() and __f !! 3876  * return value.  See filemap_fault() and __lock_page_or_retry().
6041  */                                              3877  */
6042 vm_fault_t handle_mm_fault(struct vm_area_str !! 3878 int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6043                            unsigned int flags !! 3879                 unsigned int flags)
6044 {                                                3880 {
6045         /* If the fault handler drops the mma !! 3881         int ret;
6046         struct mm_struct *mm = vma->vm_mm;    << 
6047         vm_fault_t ret;                       << 
6048         bool is_droppable;                    << 
6049                                                  3882 
6050         __set_current_state(TASK_RUNNING);       3883         __set_current_state(TASK_RUNNING);
6051                                                  3884 
6052         ret = sanitize_fault_flags(vma, &flag !! 3885         count_vm_event(PGFAULT);
6053         if (ret)                              !! 3886         count_memcg_event_mm(vma->vm_mm, PGFAULT);
6054                 goto out;                     !! 3887 
                                                   >> 3888         /* do counter updates before entering really critical section. */
                                                   >> 3889         check_sync_rss_stat(current);
6055                                                  3890 
6056         if (!arch_vma_access_permitted(vma, f    3891         if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6057                                             f    3892                                             flags & FAULT_FLAG_INSTRUCTION,
6058                                             f !! 3893                                             flags & FAULT_FLAG_REMOTE))
6059                 ret = VM_FAULT_SIGSEGV;       !! 3894                 return VM_FAULT_SIGSEGV;
6060                 goto out;                     << 
6061         }                                     << 
6062                                               << 
6063         is_droppable = !!(vma->vm_flags & VM_ << 
6064                                                  3895 
6065         /*                                       3896         /*
6066          * Enable the memcg OOM handling for     3897          * Enable the memcg OOM handling for faults triggered in user
6067          * space.  Kernel faults are handled     3898          * space.  Kernel faults are handled more gracefully.
6068          */                                      3899          */
6069         if (flags & FAULT_FLAG_USER)             3900         if (flags & FAULT_FLAG_USER)
6070                 mem_cgroup_enter_user_fault() !! 3901                 mem_cgroup_oom_enable();
6071                                               << 
6072         lru_gen_enter_fault(vma);             << 
6073                                                  3902 
6074         if (unlikely(is_vm_hugetlb_page(vma))    3903         if (unlikely(is_vm_hugetlb_page(vma)))
6075                 ret = hugetlb_fault(vma->vm_m    3904                 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6076         else                                     3905         else
6077                 ret = __handle_mm_fault(vma,     3906                 ret = __handle_mm_fault(vma, address, flags);
6078                                                  3907 
6079         /*                                    << 
6080          * Warning: It is no longer safe to d << 
6081          * because mmap_lock might have been  << 
6082          * vma might be destroyed from undern << 
6083          */                                   << 
6084                                               << 
6085         lru_gen_exit_fault();                 << 
6086                                               << 
6087         /* If the mapping is droppable, then  << 
6088         if (is_droppable)                     << 
6089                 ret &= ~VM_FAULT_OOM;         << 
6090                                               << 
6091         if (flags & FAULT_FLAG_USER) {           3908         if (flags & FAULT_FLAG_USER) {
6092                 mem_cgroup_exit_user_fault(); !! 3909                 mem_cgroup_oom_disable();
6093                 /*                               3910                 /*
6094                  * The task may have entered     3911                  * The task may have entered a memcg OOM situation but
6095                  * if the allocation error wa    3912                  * if the allocation error was handled gracefully (no
6096                  * VM_FAULT_OOM), there is no    3913                  * VM_FAULT_OOM), there is no need to kill anything.
6097                  * Just clean up the OOM stat    3914                  * Just clean up the OOM state peacefully.
6098                  */                              3915                  */
6099                 if (task_in_memcg_oom(current    3916                 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6100                         mem_cgroup_oom_synchr    3917                         mem_cgroup_oom_synchronize(false);
6101         }                                        3918         }
6102 out:                                          << 
6103         mm_account_fault(mm, regs, address, f << 
6104                                                  3919 
6105         return ret;                              3920         return ret;
6106 }                                                3921 }
6107 EXPORT_SYMBOL_GPL(handle_mm_fault);              3922 EXPORT_SYMBOL_GPL(handle_mm_fault);
6108                                                  3923 
6109 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA            << 
6110 #include <linux/extable.h>                    << 
6111                                               << 
6112 static inline bool get_mmap_lock_carefully(st << 
6113 {                                             << 
6114         if (likely(mmap_read_trylock(mm)))    << 
6115                 return true;                  << 
6116                                               << 
6117         if (regs && !user_mode(regs)) {       << 
6118                 unsigned long ip = exception_ << 
6119                 if (!search_exception_tables( << 
6120                         return false;         << 
6121         }                                     << 
6122                                               << 
6123         return !mmap_read_lock_killable(mm);  << 
6124 }                                             << 
6125                                               << 
6126 static inline bool mmap_upgrade_trylock(struc << 
6127 {                                             << 
6128         /*                                    << 
6129          * We don't have this operation yet.  << 
6130          *                                    << 
6131          * It should be easy enough to do: it << 
6132          *    atomic_long_try_cmpxchg_acquire << 
6133          * from RWSEM_READER_BIAS -> RWSEM_WR << 
6134          * it also needs the proper lockdep m << 
6135          */                                   << 
6136         return false;                         << 
6137 }                                             << 
6138                                               << 
6139 static inline bool upgrade_mmap_lock_carefull << 
6140 {                                             << 
6141         mmap_read_unlock(mm);                 << 
6142         if (regs && !user_mode(regs)) {       << 
6143                 unsigned long ip = exception_ << 
6144                 if (!search_exception_tables( << 
6145                         return false;         << 
6146         }                                     << 
6147         return !mmap_write_lock_killable(mm); << 
6148 }                                             << 
6149                                               << 
6150 /*                                            << 
6151  * Helper for page fault handling.            << 
6152  *                                            << 
6153  * This is kind of equivalend to "mmap_read_l << 
6154  * by "find_extend_vma()", except it's a lot  << 
6155  * the locking (and will drop the lock on fai << 
6156  *                                            << 
6157  * For example, if we have a kernel bug that  << 
6158  * fault, we don't want to just use mmap_read << 
6159  * the mm lock, because that would deadlock i << 
6160  * to happen while we're holding the mm lock  << 
6161  *                                            << 
6162  * So this checks the exception tables on ker << 
6163  * order to only do this all for instructions << 
6164  * expected to fault.                         << 
6165  *                                            << 
6166  * We can also actually take the mm lock for  << 
6167  * need to extend the vma, which helps the VM << 
6168  */                                           << 
6169 struct vm_area_struct *lock_mm_and_find_vma(s << 
6170                         unsigned long addr, s << 
6171 {                                             << 
6172         struct vm_area_struct *vma;           << 
6173                                               << 
6174         if (!get_mmap_lock_carefully(mm, regs << 
6175                 return NULL;                  << 
6176                                               << 
6177         vma = find_vma(mm, addr);             << 
6178         if (likely(vma && (vma->vm_start <= a << 
6179                 return vma;                   << 
6180                                               << 
6181         /*                                    << 
6182          * Well, dang. We might still be succ << 
6183          * if we can extend a vma to do so.   << 
6184          */                                   << 
6185         if (!vma || !(vma->vm_flags & VM_GROW << 
6186                 mmap_read_unlock(mm);         << 
6187                 return NULL;                  << 
6188         }                                     << 
6189                                               << 
6190         /*                                    << 
6191          * We can try to upgrade the mmap loc << 
6192          * in which case we can continue to u << 
6193          * we already looked up.              << 
6194          *                                    << 
6195          * Otherwise we'll have to drop the m << 
6196          * re-take it, and also look up the v << 
6197          * re-checking it.                    << 
6198          */                                   << 
6199         if (!mmap_upgrade_trylock(mm)) {      << 
6200                 if (!upgrade_mmap_lock_carefu << 
6201                         return NULL;          << 
6202                                               << 
6203                 vma = find_vma(mm, addr);     << 
6204                 if (!vma)                     << 
6205                         goto fail;            << 
6206                 if (vma->vm_start <= addr)    << 
6207                         goto success;         << 
6208                 if (!(vma->vm_flags & VM_GROW << 
6209                         goto fail;            << 
6210         }                                     << 
6211                                               << 
6212         if (expand_stack_locked(vma, addr))   << 
6213                 goto fail;                    << 
6214                                               << 
6215 success:                                      << 
6216         mmap_write_downgrade(mm);             << 
6217         return vma;                           << 
6218                                               << 
6219 fail:                                         << 
6220         mmap_write_unlock(mm);                << 
6221         return NULL;                          << 
6222 }                                             << 
6223 #endif                                        << 
6224                                               << 
6225 #ifdef CONFIG_PER_VMA_LOCK                    << 
6226 /*                                            << 
6227  * Lookup and lock a VMA under RCU protection << 
6228  * stable and not isolated. If the VMA is not << 
6229  * function returns NULL.                     << 
6230  */                                           << 
6231 struct vm_area_struct *lock_vma_under_rcu(str << 
6232                                           uns << 
6233 {                                             << 
6234         MA_STATE(mas, &mm->mm_mt, address, ad << 
6235         struct vm_area_struct *vma;           << 
6236                                               << 
6237         rcu_read_lock();                      << 
6238 retry:                                        << 
6239         vma = mas_walk(&mas);                 << 
6240         if (!vma)                             << 
6241                 goto inval;                   << 
6242                                               << 
6243         if (!vma_start_read(vma))             << 
6244                 goto inval;                   << 
6245                                               << 
6246         /* Check if the VMA got isolated afte << 
6247         if (vma->detached) {                  << 
6248                 vma_end_read(vma);            << 
6249                 count_vm_vma_lock_event(VMA_L << 
6250                 /* The area was replaced with << 
6251                 goto retry;                   << 
6252         }                                     << 
6253         /*                                    << 
6254          * At this point, we have a stable re << 
6255          * locked and we know it hasn't alrea << 
6256          * From here on, we can access the VM << 
6257          * fields are accessible for RCU read << 
6258          */                                   << 
6259                                               << 
6260         /* Check since vm_start/vm_end might  << 
6261         if (unlikely(address < vma->vm_start  << 
6262                 goto inval_end_read;          << 
6263                                               << 
6264         rcu_read_unlock();                    << 
6265         return vma;                           << 
6266                                               << 
6267 inval_end_read:                               << 
6268         vma_end_read(vma);                    << 
6269 inval:                                        << 
6270         rcu_read_unlock();                    << 
6271         count_vm_vma_lock_event(VMA_LOCK_ABOR << 
6272         return NULL;                          << 
6273 }                                             << 
6274 #endif /* CONFIG_PER_VMA_LOCK */              << 
6275                                               << 
6276 #ifndef __PAGETABLE_P4D_FOLDED                   3924 #ifndef __PAGETABLE_P4D_FOLDED
6277 /*                                               3925 /*
6278  * Allocate p4d page table.                      3926  * Allocate p4d page table.
6279  * We've already handled the fast-path in-lin    3927  * We've already handled the fast-path in-line.
6280  */                                              3928  */
6281 int __p4d_alloc(struct mm_struct *mm, pgd_t *    3929 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6282 {                                                3930 {
6283         p4d_t *new = p4d_alloc_one(mm, addres    3931         p4d_t *new = p4d_alloc_one(mm, address);
6284         if (!new)                                3932         if (!new)
6285                 return -ENOMEM;                  3933                 return -ENOMEM;
6286                                                  3934 
                                                   >> 3935         smp_wmb(); /* See comment in __pte_alloc */
                                                   >> 3936 
6287         spin_lock(&mm->page_table_lock);         3937         spin_lock(&mm->page_table_lock);
6288         if (pgd_present(*pgd)) {        /* An !! 3938         if (pgd_present(*pgd))          /* Another has populated it */
6289                 p4d_free(mm, new);               3939                 p4d_free(mm, new);
6290         } else {                              !! 3940         else
6291                 smp_wmb(); /* See comment in  << 
6292                 pgd_populate(mm, pgd, new);      3941                 pgd_populate(mm, pgd, new);
6293         }                                     << 
6294         spin_unlock(&mm->page_table_lock);       3942         spin_unlock(&mm->page_table_lock);
6295         return 0;                                3943         return 0;
6296 }                                                3944 }
6297 #endif /* __PAGETABLE_P4D_FOLDED */              3945 #endif /* __PAGETABLE_P4D_FOLDED */
6298                                                  3946 
6299 #ifndef __PAGETABLE_PUD_FOLDED                   3947 #ifndef __PAGETABLE_PUD_FOLDED
6300 /*                                               3948 /*
6301  * Allocate page upper directory.                3949  * Allocate page upper directory.
6302  * We've already handled the fast-path in-lin    3950  * We've already handled the fast-path in-line.
6303  */                                              3951  */
6304 int __pud_alloc(struct mm_struct *mm, p4d_t *    3952 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6305 {                                                3953 {
6306         pud_t *new = pud_alloc_one(mm, addres    3954         pud_t *new = pud_alloc_one(mm, address);
6307         if (!new)                                3955         if (!new)
6308                 return -ENOMEM;                  3956                 return -ENOMEM;
6309                                                  3957 
                                                   >> 3958         smp_wmb(); /* See comment in __pte_alloc */
                                                   >> 3959 
6310         spin_lock(&mm->page_table_lock);         3960         spin_lock(&mm->page_table_lock);
6311         if (!p4d_present(*p4d)) {             !! 3961 #ifndef __ARCH_HAS_5LEVEL_HACK
6312                 mm_inc_nr_puds(mm);           !! 3962         if (p4d_present(*p4d))          /* Another has populated it */
6313                 smp_wmb(); /* See comment in  !! 3963                 pud_free(mm, new);
                                                   >> 3964         else
6314                 p4d_populate(mm, p4d, new);      3965                 p4d_populate(mm, p4d, new);
6315         } else  /* Another has populated it * !! 3966 #else
                                                   >> 3967         if (pgd_present(*p4d))          /* Another has populated it */
6316                 pud_free(mm, new);               3968                 pud_free(mm, new);
                                                   >> 3969         else
                                                   >> 3970                 pgd_populate(mm, p4d, new);
                                                   >> 3971 #endif /* __ARCH_HAS_5LEVEL_HACK */
6317         spin_unlock(&mm->page_table_lock);       3972         spin_unlock(&mm->page_table_lock);
6318         return 0;                                3973         return 0;
6319 }                                                3974 }
6320 #endif /* __PAGETABLE_PUD_FOLDED */              3975 #endif /* __PAGETABLE_PUD_FOLDED */
6321                                                  3976 
6322 #ifndef __PAGETABLE_PMD_FOLDED                   3977 #ifndef __PAGETABLE_PMD_FOLDED
6323 /*                                               3978 /*
6324  * Allocate page middle directory.               3979  * Allocate page middle directory.
6325  * We've already handled the fast-path in-lin    3980  * We've already handled the fast-path in-line.
6326  */                                              3981  */
6327 int __pmd_alloc(struct mm_struct *mm, pud_t *    3982 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6328 {                                                3983 {
6329         spinlock_t *ptl;                         3984         spinlock_t *ptl;
6330         pmd_t *new = pmd_alloc_one(mm, addres    3985         pmd_t *new = pmd_alloc_one(mm, address);
6331         if (!new)                                3986         if (!new)
6332                 return -ENOMEM;                  3987                 return -ENOMEM;
6333                                                  3988 
                                                   >> 3989         smp_wmb(); /* See comment in __pte_alloc */
                                                   >> 3990 
6334         ptl = pud_lock(mm, pud);                 3991         ptl = pud_lock(mm, pud);
                                                   >> 3992 #ifndef __ARCH_HAS_4LEVEL_HACK
6335         if (!pud_present(*pud)) {                3993         if (!pud_present(*pud)) {
6336                 mm_inc_nr_pmds(mm);              3994                 mm_inc_nr_pmds(mm);
6337                 smp_wmb(); /* See comment in  << 
6338                 pud_populate(mm, pud, new);      3995                 pud_populate(mm, pud, new);
6339         } else {        /* Another has popula !! 3996         } else  /* Another has populated it */
6340                 pmd_free(mm, new);               3997                 pmd_free(mm, new);
6341         }                                     !! 3998 #else
                                                   >> 3999         if (!pgd_present(*pud)) {
                                                   >> 4000                 mm_inc_nr_pmds(mm);
                                                   >> 4001                 pgd_populate(mm, pud, new);
                                                   >> 4002         } else /* Another has populated it */
                                                   >> 4003                 pmd_free(mm, new);
                                                   >> 4004 #endif /* __ARCH_HAS_4LEVEL_HACK */
6342         spin_unlock(ptl);                        4005         spin_unlock(ptl);
6343         return 0;                                4006         return 0;
6344 }                                                4007 }
6345 #endif /* __PAGETABLE_PMD_FOLDED */              4008 #endif /* __PAGETABLE_PMD_FOLDED */
6346                                                  4009 
6347 static inline void pfnmap_args_setup(struct f !! 4010 static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
6348                                      spinlock !! 4011                             unsigned long *start, unsigned long *end,
6349                                      pgprot_t !! 4012                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
6350                                      unsigned << 
6351                                      bool spe << 
6352 {                                             << 
6353         args->lock = lock;                    << 
6354         args->ptep = ptep;                    << 
6355         args->pfn = pfn_base + ((args->addres << 
6356         args->pgprot = pgprot;                << 
6357         args->writable = writable;            << 
6358         args->special = special;              << 
6359 }                                             << 
6360                                               << 
6361 static inline void pfnmap_lockdep_assert(stru << 
6362 {                                             << 
6363 #ifdef CONFIG_LOCKDEP                         << 
6364         struct file *file = vma->vm_file;     << 
6365         struct address_space *mapping = file  << 
6366                                               << 
6367         if (mapping)                          << 
6368                 lockdep_assert(lockdep_is_hel << 
6369                                lockdep_is_hel << 
6370         else                                  << 
6371                 lockdep_assert(lockdep_is_hel << 
6372 #endif                                        << 
6373 }                                             << 
6374                                               << 
6375 /**                                           << 
6376  * follow_pfnmap_start() - Look up a pfn mapp << 
6377  * @args: Pointer to struct @follow_pfnmap_ar << 
6378  *                                            << 
6379  * The caller needs to setup args->vma and ar << 
6380  * virtual address as the target of such look << 
6381  * the results will be put into other output  << 
6382  *                                            << 
6383  * After the caller finished using the fields << 
6384  * another follow_pfnmap_end() to proper rele << 
6385  * of such look up request.                   << 
6386  *                                            << 
6387  * During the start() and end() calls, the re << 
6388  * as proper locks will be held.  After the e << 
6389  * in @follow_pfnmap_args will be invalid to  << 
6390  * use of such information after end() may re << 
6391  * by the caller with page table updates, oth << 
6392  * security bug.                              << 
6393  *                                            << 
6394  * If the PTE maps a refcounted page, callers << 
6395  * against invalidation with MMU notifiers; o << 
6396  * a later point in time can trigger use-afte << 
6397  *                                            << 
6398  * Only IO mappings and raw PFN mappings are  << 
6399  * should be taken for read, and the mmap sem << 
6400  * before the end() is invoked.               << 
6401  *                                            << 
6402  * This function must not be used to modify P << 
6403  *                                            << 
6404  * Return: zero on success, negative otherwis << 
6405  */                                           << 
6406 int follow_pfnmap_start(struct follow_pfnmap_ << 
6407 {                                                4013 {
6408         struct vm_area_struct *vma = args->vm !! 4014         pgd_t *pgd;
6409         unsigned long address = args->address !! 4015         p4d_t *p4d;
6410         struct mm_struct *mm = vma->vm_mm;    !! 4016         pud_t *pud;
6411         spinlock_t *lock;                     !! 4017         pmd_t *pmd;
6412         pgd_t *pgdp;                          !! 4018         pte_t *ptep;
6413         p4d_t *p4dp, p4d;                     << 
6414         pud_t *pudp, pud;                     << 
6415         pmd_t *pmdp, pmd;                     << 
6416         pte_t *ptep, pte;                     << 
6417                                               << 
6418         pfnmap_lockdep_assert(vma);           << 
6419                                                  4019 
6420         if (unlikely(address < vma->vm_start  !! 4020         pgd = pgd_offset(mm, address);
                                                   >> 4021         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
6421                 goto out;                        4022                 goto out;
6422                                                  4023 
6423         if (!(vma->vm_flags & (VM_IO | VM_PFN !! 4024         p4d = p4d_offset(pgd, address);
6424                 goto out;                     !! 4025         if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
6425 retry:                                        << 
6426         pgdp = pgd_offset(mm, address);       << 
6427         if (pgd_none(*pgdp) || unlikely(pgd_b << 
6428                 goto out;                        4026                 goto out;
6429                                                  4027 
6430         p4dp = p4d_offset(pgdp, address);     !! 4028         pud = pud_offset(p4d, address);
6431         p4d = READ_ONCE(*p4dp);               !! 4029         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
6432         if (p4d_none(p4d) || unlikely(p4d_bad << 
6433                 goto out;                        4030                 goto out;
6434                                                  4031 
6435         pudp = pud_offset(p4dp, address);     !! 4032         pmd = pmd_offset(pud, address);
6436         pud = READ_ONCE(*pudp);               !! 4033         VM_BUG_ON(pmd_trans_huge(*pmd));
6437         if (pud_none(pud))                    !! 4034 
6438                 goto out;                     !! 4035         if (pmd_huge(*pmd)) {
6439         if (pud_leaf(pud)) {                  !! 4036                 if (!pmdpp)
6440                 lock = pud_lock(mm, pudp);    !! 4037                         goto out;
6441                 if (!unlikely(pud_leaf(pud))) << 
6442                         spin_unlock(lock);    << 
6443                         goto retry;           << 
6444                 }                             << 
6445                 pfnmap_args_setup(args, lock, << 
6446                                   pud_pfn(pud << 
6447                                   pud_special << 
6448                 return 0;                     << 
6449         }                                     << 
6450                                                  4038 
6451         pmdp = pmd_offset(pudp, address);     !! 4039                 if (start && end) {
6452         pmd = pmdp_get_lockless(pmdp);        !! 4040                         *start = address & PMD_MASK;
6453         if (pmd_leaf(pmd)) {                  !! 4041                         *end = *start + PMD_SIZE;
6454                 lock = pmd_lock(mm, pmdp);    !! 4042                         mmu_notifier_invalidate_range_start(mm, *start, *end);
6455                 if (!unlikely(pmd_leaf(pmd))) !! 4043                 }
6456                         spin_unlock(lock);    !! 4044                 *ptlp = pmd_lock(mm, pmd);
6457                         goto retry;           !! 4045                 if (pmd_huge(*pmd)) {
                                                   >> 4046                         *pmdpp = pmd;
                                                   >> 4047                         return 0;
6458                 }                                4048                 }
6459                 pfnmap_args_setup(args, lock, !! 4049                 spin_unlock(*ptlp);
6460                                   pmd_pfn(pmd !! 4050                 if (start && end)
6461                                   pmd_special !! 4051                         mmu_notifier_invalidate_range_end(mm, *start, *end);
6462                 return 0;                     << 
6463         }                                        4052         }
6464                                                  4053 
6465         ptep = pte_offset_map_lock(mm, pmdp,  !! 4054         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
6466         if (!ptep)                            << 
6467                 goto out;                        4055                 goto out;
6468         pte = ptep_get(ptep);                 !! 4056 
6469         if (!pte_present(pte))                !! 4057         if (start && end) {
                                                   >> 4058                 *start = address & PAGE_MASK;
                                                   >> 4059                 *end = *start + PAGE_SIZE;
                                                   >> 4060                 mmu_notifier_invalidate_range_start(mm, *start, *end);
                                                   >> 4061         }
                                                   >> 4062         ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
                                                   >> 4063         if (!pte_present(*ptep))
6470                 goto unlock;                     4064                 goto unlock;
6471         pfnmap_args_setup(args, lock, ptep, p !! 4065         *ptepp = ptep;
6472                           pte_pfn(pte), PAGE_ << 
6473                           pte_special(pte));  << 
6474         return 0;                                4066         return 0;
6475 unlock:                                          4067 unlock:
6476         pte_unmap_unlock(ptep, lock);         !! 4068         pte_unmap_unlock(ptep, *ptlp);
                                                   >> 4069         if (start && end)
                                                   >> 4070                 mmu_notifier_invalidate_range_end(mm, *start, *end);
6477 out:                                             4071 out:
6478         return -EINVAL;                          4072         return -EINVAL;
6479 }                                                4073 }
6480 EXPORT_SYMBOL_GPL(follow_pfnmap_start);       !! 4074 
                                                   >> 4075 static inline int follow_pte(struct mm_struct *mm, unsigned long address,
                                                   >> 4076                              pte_t **ptepp, spinlock_t **ptlp)
                                                   >> 4077 {
                                                   >> 4078         int res;
                                                   >> 4079 
                                                   >> 4080         /* (void) is needed to make gcc happy */
                                                   >> 4081         (void) __cond_lock(*ptlp,
                                                   >> 4082                            !(res = __follow_pte_pmd(mm, address, NULL, NULL,
                                                   >> 4083                                                     ptepp, NULL, ptlp)));
                                                   >> 4084         return res;
                                                   >> 4085 }
                                                   >> 4086 
                                                   >> 4087 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
                                                   >> 4088                              unsigned long *start, unsigned long *end,
                                                   >> 4089                              pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
                                                   >> 4090 {
                                                   >> 4091         int res;
                                                   >> 4092 
                                                   >> 4093         /* (void) is needed to make gcc happy */
                                                   >> 4094         (void) __cond_lock(*ptlp,
                                                   >> 4095                            !(res = __follow_pte_pmd(mm, address, start, end,
                                                   >> 4096                                                     ptepp, pmdpp, ptlp)));
                                                   >> 4097         return res;
                                                   >> 4098 }
                                                   >> 4099 EXPORT_SYMBOL(follow_pte_pmd);
6481                                                  4100 
6482 /**                                              4101 /**
6483  * follow_pfnmap_end(): End a follow_pfnmap_s !! 4102  * follow_pfn - look up PFN at a user virtual address
6484  * @args: Pointer to struct @follow_pfnmap_ar !! 4103  * @vma: memory mapping
                                                   >> 4104  * @address: user virtual address
                                                   >> 4105  * @pfn: location to store found PFN
                                                   >> 4106  *
                                                   >> 4107  * Only IO mappings and raw PFN mappings are allowed.
6485  *                                               4108  *
6486  * Must be used in pair of follow_pfnmap_star !! 4109  * Returns zero and the pfn at @pfn on success, -ve otherwise.
6487  * above for more information.                << 
6488  */                                              4110  */
6489 void follow_pfnmap_end(struct follow_pfnmap_a !! 4111 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
                                                   >> 4112         unsigned long *pfn)
6490 {                                                4113 {
6491         if (args->lock)                       !! 4114         int ret = -EINVAL;
6492                 spin_unlock(args->lock);      !! 4115         spinlock_t *ptl;
6493         if (args->ptep)                       !! 4116         pte_t *ptep;
6494                 pte_unmap(args->ptep);        !! 4117 
                                                   >> 4118         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
                                                   >> 4119                 return ret;
                                                   >> 4120 
                                                   >> 4121         ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
                                                   >> 4122         if (ret)
                                                   >> 4123                 return ret;
                                                   >> 4124         *pfn = pte_pfn(*ptep);
                                                   >> 4125         pte_unmap_unlock(ptep, ptl);
                                                   >> 4126         return 0;
6495 }                                                4127 }
6496 EXPORT_SYMBOL_GPL(follow_pfnmap_end);         !! 4128 EXPORT_SYMBOL(follow_pfn);
6497                                                  4129 
6498 #ifdef CONFIG_HAVE_IOREMAP_PROT                  4130 #ifdef CONFIG_HAVE_IOREMAP_PROT
6499 /**                                           !! 4131 int follow_phys(struct vm_area_struct *vma,
6500  * generic_access_phys - generic implementati !! 4132                 unsigned long address, unsigned int flags,
6501  * @vma: the vma to access                    !! 4133                 unsigned long *prot, resource_size_t *phys)
6502  * @addr: userspace address, not relative off !! 4134 {
6503  * @buf: buffer to read/write                 !! 4135         int ret = -EINVAL;
6504  * @len: length of transfer                   !! 4136         pte_t *ptep, pte;
6505  * @write: set to FOLL_WRITE when writing, ot !! 4137         spinlock_t *ptl;
6506  *                                            !! 4138 
6507  * This is a generic implementation for &vm_o !! 4139         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6508  * iomem mapping. This callback is used by ac !! 4140                 goto out;
6509  * not page based.                            !! 4141 
6510  */                                           !! 4142         if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
                                                   >> 4143                 goto out;
                                                   >> 4144         pte = *ptep;
                                                   >> 4145 
                                                   >> 4146         if ((flags & FOLL_WRITE) && !pte_write(pte))
                                                   >> 4147                 goto unlock;
                                                   >> 4148 
                                                   >> 4149         *prot = pgprot_val(pte_pgprot(pte));
                                                   >> 4150         *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
                                                   >> 4151 
                                                   >> 4152         ret = 0;
                                                   >> 4153 unlock:
                                                   >> 4154         pte_unmap_unlock(ptep, ptl);
                                                   >> 4155 out:
                                                   >> 4156         return ret;
                                                   >> 4157 }
                                                   >> 4158 
6511 int generic_access_phys(struct vm_area_struct    4159 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6512                         void *buf, int len, i    4160                         void *buf, int len, int write)
6513 {                                                4161 {
6514         resource_size_t phys_addr;               4162         resource_size_t phys_addr;
6515         unsigned long prot = 0;                  4163         unsigned long prot = 0;
6516         void __iomem *maddr;                     4164         void __iomem *maddr;
6517         int offset = offset_in_page(addr);    !! 4165         int offset = addr & (PAGE_SIZE-1);
6518         int ret = -EINVAL;                    << 
6519         bool writable;                        << 
6520         struct follow_pfnmap_args args = { .v << 
6521                                               << 
6522 retry:                                        << 
6523         if (follow_pfnmap_start(&args))       << 
6524                 return -EINVAL;               << 
6525         prot = pgprot_val(args.pgprot);       << 
6526         phys_addr = (resource_size_t)args.pfn << 
6527         writable = args.writable;             << 
6528         follow_pfnmap_end(&args);             << 
6529                                                  4166 
6530         if ((write & FOLL_WRITE) && !writable !! 4167         if (follow_phys(vma, addr, write, &prot, &phys_addr))
6531                 return -EINVAL;                  4168                 return -EINVAL;
6532                                                  4169 
6533         maddr = ioremap_prot(phys_addr, PAGE_    4170         maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6534         if (!maddr)                           << 
6535                 return -ENOMEM;               << 
6536                                               << 
6537         if (follow_pfnmap_start(&args))       << 
6538                 goto out_unmap;               << 
6539                                               << 
6540         if ((prot != pgprot_val(args.pgprot)) << 
6541             (phys_addr != (args.pfn << PAGE_S << 
6542             (writable != args.writable)) {    << 
6543                 follow_pfnmap_end(&args);     << 
6544                 iounmap(maddr);               << 
6545                 goto retry;                   << 
6546         }                                     << 
6547                                               << 
6548         if (write)                               4171         if (write)
6549                 memcpy_toio(maddr + offset, b    4172                 memcpy_toio(maddr + offset, buf, len);
6550         else                                     4173         else
6551                 memcpy_fromio(buf, maddr + of    4174                 memcpy_fromio(buf, maddr + offset, len);
6552         ret = len;                            << 
6553         follow_pfnmap_end(&args);             << 
6554 out_unmap:                                    << 
6555         iounmap(maddr);                          4175         iounmap(maddr);
6556                                                  4176 
6557         return ret;                           !! 4177         return len;
6558 }                                                4178 }
6559 EXPORT_SYMBOL_GPL(generic_access_phys);          4179 EXPORT_SYMBOL_GPL(generic_access_phys);
6560 #endif                                           4180 #endif
6561                                                  4181 
6562 /*                                               4182 /*
6563  * Access another process' address space as g !! 4183  * Access another process' address space as given in mm.  If non-NULL, use the
                                                   >> 4184  * given task for page fault accounting.
6564  */                                              4185  */
6565 static int __access_remote_vm(struct mm_struc !! 4186 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
6566                               void *buf, int  !! 4187                 unsigned long addr, void *buf, int len, unsigned int gup_flags)
6567 {                                                4188 {
                                                   >> 4189         struct vm_area_struct *vma;
6568         void *old_buf = buf;                     4190         void *old_buf = buf;
6569         int write = gup_flags & FOLL_WRITE;      4191         int write = gup_flags & FOLL_WRITE;
6570                                                  4192 
6571         if (mmap_read_lock_killable(mm))      !! 4193         down_read(&mm->mmap_sem);
6572                 return 0;                     << 
6573                                               << 
6574         /* Untag the address before looking u << 
6575         addr = untagged_addr_remote(mm, addr) << 
6576                                               << 
6577         /* Avoid triggering the temporary war << 
6578         if (!vma_lookup(mm, addr) && !expand_ << 
6579                 return 0;                     << 
6580                                               << 
6581         /* ignore errors, just check how much    4194         /* ignore errors, just check how much was successfully transferred */
6582         while (len) {                            4195         while (len) {
6583                 int bytes, offset;            !! 4196                 int bytes, ret, offset;
6584                 void *maddr;                     4197                 void *maddr;
6585                 struct vm_area_struct *vma =  !! 4198                 struct page *page = NULL;
6586                 struct page *page = get_user_ << 
6587                                               << 
6588                                               << 
6589                 if (IS_ERR(page)) {           << 
6590                         /* We might need to e << 
6591                         vma = vma_lookup(mm,  << 
6592                         if (!vma) {           << 
6593                                 vma = expand_ << 
6594                                               << 
6595                                 /* mmap_lock  << 
6596                                 if (!vma)     << 
6597                                         retur << 
6598                                               << 
6599                                 /* Try again  << 
6600                                 continue;     << 
6601                         }                     << 
6602                                                  4199 
                                                   >> 4200                 ret = get_user_pages_remote(tsk, mm, addr, 1,
                                                   >> 4201                                 gup_flags, &page, &vma, NULL);
                                                   >> 4202                 if (ret <= 0) {
                                                   >> 4203 #ifndef CONFIG_HAVE_IOREMAP_PROT
                                                   >> 4204                         break;
                                                   >> 4205 #else
6603                         /*                       4206                         /*
6604                          * Check if this is a    4207                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
6605                          * we can access usin    4208                          * we can access using slightly different code.
6606                          */                      4209                          */
6607                         bytes = 0;            !! 4210                         vma = find_vma(mm, addr);
6608 #ifdef CONFIG_HAVE_IOREMAP_PROT               !! 4211                         if (!vma || vma->vm_start > addr)
                                                   >> 4212                                 break;
6609                         if (vma->vm_ops && vm    4213                         if (vma->vm_ops && vma->vm_ops->access)
6610                                 bytes = vma-> !! 4214                                 ret = vma->vm_ops->access(vma, addr, buf,
6611                                               !! 4215                                                           len, write);
6612 #endif                                        !! 4216                         if (ret <= 0)
6613                         if (bytes <= 0)       << 
6614                                 break;           4217                                 break;
                                                   >> 4218                         bytes = ret;
                                                   >> 4219 #endif
6615                 } else {                         4220                 } else {
6616                         bytes = len;             4221                         bytes = len;
6617                         offset = addr & (PAGE    4222                         offset = addr & (PAGE_SIZE-1);
6618                         if (bytes > PAGE_SIZE    4223                         if (bytes > PAGE_SIZE-offset)
6619                                 bytes = PAGE_    4224                                 bytes = PAGE_SIZE-offset;
6620                                                  4225 
6621                         maddr = kmap_local_pa !! 4226                         maddr = kmap(page);
6622                         if (write) {             4227                         if (write) {
6623                                 copy_to_user_    4228                                 copy_to_user_page(vma, page, addr,
6624                                                  4229                                                   maddr + offset, buf, bytes);
6625                                 set_page_dirt    4230                                 set_page_dirty_lock(page);
6626                         } else {                 4231                         } else {
6627                                 copy_from_use    4232                                 copy_from_user_page(vma, page, addr,
6628                                                  4233                                                     buf, maddr + offset, bytes);
6629                         }                        4234                         }
6630                         unmap_and_put_page(pa !! 4235                         kunmap(page);
                                                   >> 4236                         put_page(page);
6631                 }                                4237                 }
6632                 len -= bytes;                    4238                 len -= bytes;
6633                 buf += bytes;                    4239                 buf += bytes;
6634                 addr += bytes;                   4240                 addr += bytes;
6635         }                                        4241         }
6636         mmap_read_unlock(mm);                 !! 4242         up_read(&mm->mmap_sem);
6637                                                  4243 
6638         return buf - old_buf;                    4244         return buf - old_buf;
6639 }                                                4245 }
6640                                                  4246 
6641 /**                                              4247 /**
6642  * access_remote_vm - access another process'    4248  * access_remote_vm - access another process' address space
6643  * @mm:         the mm_struct of the target a    4249  * @mm:         the mm_struct of the target address space
6644  * @addr:       start address to access          4250  * @addr:       start address to access
6645  * @buf:        source or destination buffer     4251  * @buf:        source or destination buffer
6646  * @len:        number of bytes to transfer      4252  * @len:        number of bytes to transfer
6647  * @gup_flags:  flags modifying lookup behavi    4253  * @gup_flags:  flags modifying lookup behaviour
6648  *                                               4254  *
6649  * The caller must hold a reference on @mm.      4255  * The caller must hold a reference on @mm.
6650  *                                            << 
6651  * Return: number of bytes copied from source << 
6652  */                                              4256  */
6653 int access_remote_vm(struct mm_struct *mm, un    4257 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6654                 void *buf, int len, unsigned     4258                 void *buf, int len, unsigned int gup_flags)
6655 {                                                4259 {
6656         return __access_remote_vm(mm, addr, b !! 4260         return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
6657 }                                                4261 }
6658                                                  4262 
6659 /*                                               4263 /*
6660  * Access another process' address space.        4264  * Access another process' address space.
6661  * Source/target buffer must be kernel space,    4265  * Source/target buffer must be kernel space,
6662  * Do not walk the page table directly, use g    4266  * Do not walk the page table directly, use get_user_pages
6663  */                                              4267  */
6664 int access_process_vm(struct task_struct *tsk    4268 int access_process_vm(struct task_struct *tsk, unsigned long addr,
6665                 void *buf, int len, unsigned     4269                 void *buf, int len, unsigned int gup_flags)
6666 {                                                4270 {
6667         struct mm_struct *mm;                    4271         struct mm_struct *mm;
6668         int ret;                                 4272         int ret;
6669                                                  4273 
6670         mm = get_task_mm(tsk);                   4274         mm = get_task_mm(tsk);
6671         if (!mm)                                 4275         if (!mm)
6672                 return 0;                        4276                 return 0;
6673                                                  4277 
6674         ret = __access_remote_vm(mm, addr, bu !! 4278         ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
6675                                                  4279 
6676         mmput(mm);                               4280         mmput(mm);
6677                                                  4281 
6678         return ret;                              4282         return ret;
6679 }                                                4283 }
6680 EXPORT_SYMBOL_GPL(access_process_vm);            4284 EXPORT_SYMBOL_GPL(access_process_vm);
6681                                                  4285 
6682 /*                                               4286 /*
6683  * Print the name of a VMA.                      4287  * Print the name of a VMA.
6684  */                                              4288  */
6685 void print_vma_addr(char *prefix, unsigned lo    4289 void print_vma_addr(char *prefix, unsigned long ip)
6686 {                                                4290 {
6687         struct mm_struct *mm = current->mm;      4291         struct mm_struct *mm = current->mm;
6688         struct vm_area_struct *vma;              4292         struct vm_area_struct *vma;
6689                                                  4293 
6690         /*                                       4294         /*
6691          * we might be running from an atomic !! 4295          * Do not print if we are in atomic
                                                   >> 4296          * contexts (in exception stacks, etc.):
6692          */                                      4297          */
6693         if (!mmap_read_trylock(mm))           !! 4298         if (preempt_count())
6694                 return;                          4299                 return;
6695                                                  4300 
6696         vma = vma_lookup(mm, ip);             !! 4301         down_read(&mm->mmap_sem);
                                                   >> 4302         vma = find_vma(mm, ip);
6697         if (vma && vma->vm_file) {               4303         if (vma && vma->vm_file) {
6698                 struct file *f = vma->vm_file    4304                 struct file *f = vma->vm_file;
6699                 ip -= vma->vm_start;          !! 4305                 char *buf = (char *)__get_free_page(GFP_KERNEL);
6700                 ip += vma->vm_pgoff << PAGE_S !! 4306                 if (buf) {
6701                 printk("%s%pD[%lx,%lx+%lx]",  !! 4307                         char *p;
6702                                 vma->vm_start !! 4308 
6703                                 vma->vm_end - !! 4309                         p = file_path(f, buf, PAGE_SIZE);
                                                   >> 4310                         if (IS_ERR(p))
                                                   >> 4311                                 p = "?";
                                                   >> 4312                         printk("%s%s[%lx+%lx]", prefix, kbasename(p),
                                                   >> 4313                                         vma->vm_start,
                                                   >> 4314                                         vma->vm_end - vma->vm_start);
                                                   >> 4315                         free_page((unsigned long)buf);
                                                   >> 4316                 }
6704         }                                        4317         }
6705         mmap_read_unlock(mm);                 !! 4318         up_read(&mm->mmap_sem);
6706 }                                                4319 }
6707                                                  4320 
6708 #if defined(CONFIG_PROVE_LOCKING) || defined(    4321 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6709 void __might_fault(const char *file, int line    4322 void __might_fault(const char *file, int line)
6710 {                                                4323 {
                                                   >> 4324         /*
                                                   >> 4325          * Some code (nfs/sunrpc) uses socket ops on kernel memory while
                                                   >> 4326          * holding the mmap_sem, this is safe because kernel memory doesn't
                                                   >> 4327          * get paged out, therefore we'll never actually fault, and the
                                                   >> 4328          * below annotations will generate false positives.
                                                   >> 4329          */
                                                   >> 4330         if (uaccess_kernel())
                                                   >> 4331                 return;
6711         if (pagefault_disabled())                4332         if (pagefault_disabled())
6712                 return;                          4333                 return;
6713         __might_sleep(file, line);            !! 4334         __might_sleep(file, line, 0);
6714 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)           4335 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6715         if (current->mm)                         4336         if (current->mm)
6716                 might_lock_read(&current->mm- !! 4337                 might_lock_read(&current->mm->mmap_sem);
6717 #endif                                           4338 #endif
6718 }                                                4339 }
6719 EXPORT_SYMBOL(__might_fault);                    4340 EXPORT_SYMBOL(__might_fault);
6720 #endif                                           4341 #endif
6721                                                  4342 
6722 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || d    4343 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
6723 /*                                            !! 4344 static void clear_gigantic_page(struct page *page,
6724  * Process all subpages of the specified huge !! 4345                                 unsigned long addr,
6725  * operation.  The target subpage will be pro !! 4346                                 unsigned int pages_per_huge_page)
6726  * cache lines hot.                           !! 4347 {
6727  */                                           !! 4348         int i;
6728 static inline int process_huge_page(          !! 4349         struct page *p = page;
6729         unsigned long addr_hint, unsigned int << 
6730         int (*process_subpage)(unsigned long  << 
6731         void *arg)                            << 
6732 {                                             << 
6733         int i, n, base, l, ret;               << 
6734         unsigned long addr = addr_hint &      << 
6735                 ~(((unsigned long)nr_pages << << 
6736                                                  4350 
6737         /* Process target subpage last to kee << 
6738         might_sleep();                           4351         might_sleep();
6739         n = (addr_hint - addr) / PAGE_SIZE;   !! 4352         for (i = 0; i < pages_per_huge_page;
6740         if (2 * n <= nr_pages) {              !! 4353              i++, p = mem_map_next(p, page, i)) {
6741                 /* If target subpage in first << 
6742                 base = 0;                     << 
6743                 l = n;                        << 
6744                 /* Process subpages at the en << 
6745                 for (i = nr_pages - 1; i >= 2 << 
6746                         cond_resched();       << 
6747                         ret = process_subpage << 
6748                         if (ret)              << 
6749                                 return ret;   << 
6750                 }                             << 
6751         } else {                              << 
6752                 /* If target subpage in secon << 
6753                 base = nr_pages - 2 * (nr_pag << 
6754                 l = nr_pages - n;             << 
6755                 /* Process subpages at the be << 
6756                 for (i = 0; i < base; i++) {  << 
6757                         cond_resched();       << 
6758                         ret = process_subpage << 
6759                         if (ret)              << 
6760                                 return ret;   << 
6761                 }                             << 
6762         }                                     << 
6763         /*                                    << 
6764          * Process remaining subpages in left << 
6765          * towards the target subpage         << 
6766          */                                   << 
6767         for (i = 0; i < l; i++) {             << 
6768                 int left_idx = base + i;      << 
6769                 int right_idx = base + 2 * l  << 
6770                                               << 
6771                 cond_resched();                  4354                 cond_resched();
6772                 ret = process_subpage(addr +  !! 4355                 clear_user_highpage(p, addr + i * PAGE_SIZE);
6773                 if (ret)                      << 
6774                         return ret;           << 
6775                 cond_resched();               << 
6776                 ret = process_subpage(addr +  << 
6777                 if (ret)                      << 
6778                         return ret;           << 
6779         }                                        4356         }
6780         return 0;                             << 
6781 }                                                4357 }
6782                                               !! 4358 void clear_huge_page(struct page *page,
6783 static void clear_gigantic_page(struct folio  !! 4359                      unsigned long addr, unsigned int pages_per_huge_page)
6784                                 unsigned int  << 
6785 {                                                4360 {
6786         int i;                                   4361         int i;
6787                                                  4362 
                                                   >> 4363         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
                                                   >> 4364                 clear_gigantic_page(page, addr, pages_per_huge_page);
                                                   >> 4365                 return;
                                                   >> 4366         }
                                                   >> 4367 
6788         might_sleep();                           4368         might_sleep();
6789         for (i = 0; i < nr_pages; i++) {      !! 4369         for (i = 0; i < pages_per_huge_page; i++) {
6790                 cond_resched();                  4370                 cond_resched();
6791                 clear_user_highpage(folio_pag !! 4371                 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
6792         }                                        4372         }
6793 }                                                4373 }
6794                                                  4374 
6795 static int clear_subpage(unsigned long addr,  !! 4375 static void copy_user_gigantic_page(struct page *dst, struct page *src,
                                                   >> 4376                                     unsigned long addr,
                                                   >> 4377                                     struct vm_area_struct *vma,
                                                   >> 4378                                     unsigned int pages_per_huge_page)
6796 {                                                4379 {
6797         struct folio *folio = arg;            !! 4380         int i;
6798                                               !! 4381         struct page *dst_base = dst;
6799         clear_user_highpage(folio_page(folio, !! 4382         struct page *src_base = src;
6800         return 0;                             << 
6801 }                                             << 
6802                                                  4383 
6803 /**                                           !! 4384         for (i = 0; i < pages_per_huge_page; ) {
6804  * folio_zero_user - Zero a folio which will  !! 4385                 cond_resched();
6805  * @folio: The folio to zero.                 !! 4386                 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
6806  * @addr_hint: The address will be accessed o << 
6807  */                                           << 
6808 void folio_zero_user(struct folio *folio, uns << 
6809 {                                             << 
6810         unsigned int nr_pages = folio_nr_page << 
6811                                                  4387 
6812         if (unlikely(nr_pages > MAX_ORDER_NR_ !! 4388                 i++;
6813                 clear_gigantic_page(folio, ad !! 4389                 dst = mem_map_next(dst, dst_base, i);
6814         else                                  !! 4390                 src = mem_map_next(src, src_base, i);
6815                 process_huge_page(addr_hint,  !! 4391         }
6816 }                                                4392 }
6817                                                  4393 
6818 static int copy_user_gigantic_page(struct fol !! 4394 void copy_user_huge_page(struct page *dst, struct page *src,
6819                                    unsigned l !! 4395                          unsigned long addr, struct vm_area_struct *vma,
6820                                    struct vm_ !! 4396                          unsigned int pages_per_huge_page)
6821                                    unsigned i << 
6822 {                                                4397 {
6823         int i;                                   4398         int i;
6824         struct page *dst_page;                << 
6825         struct page *src_page;                << 
6826                                                  4399 
6827         for (i = 0; i < nr_pages; i++) {      !! 4400         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
6828                 dst_page = folio_page(dst, i) !! 4401                 copy_user_gigantic_page(dst, src, addr, vma,
6829                 src_page = folio_page(src, i) !! 4402                                         pages_per_huge_page);
                                                   >> 4403                 return;
                                                   >> 4404         }
6830                                                  4405 
                                                   >> 4406         might_sleep();
                                                   >> 4407         for (i = 0; i < pages_per_huge_page; i++) {
6831                 cond_resched();                  4408                 cond_resched();
6832                 if (copy_mc_user_highpage(dst !! 4409                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
6833                                           add << 
6834                         return -EHWPOISON;    << 
6835         }                                        4410         }
6836         return 0;                             << 
6837 }                                                4411 }
6838                                                  4412 
6839 struct copy_subpage_arg {                     !! 4413 long copy_huge_page_from_user(struct page *dst_page,
6840         struct folio *dst;                    !! 4414                                 const void __user *usr_src,
6841         struct folio *src;                    !! 4415                                 unsigned int pages_per_huge_page,
6842         struct vm_area_struct *vma;           !! 4416                                 bool allow_pagefault)
6843 };                                            << 
6844                                               << 
6845 static int copy_subpage(unsigned long addr, i << 
6846 {                                                4417 {
6847         struct copy_subpage_arg *copy_arg = a !! 4418         void *src = (void *)usr_src;
6848         struct page *dst = folio_page(copy_ar !! 4419         void *page_kaddr;
6849         struct page *src = folio_page(copy_ar << 
6850                                               << 
6851         if (copy_mc_user_highpage(dst, src, a << 
6852                 return -EHWPOISON;            << 
6853         return 0;                             << 
6854 }                                             << 
6855                                               << 
6856 int copy_user_large_folio(struct folio *dst,  << 
6857                           unsigned long addr_ << 
6858 {                                             << 
6859         unsigned int nr_pages = folio_nr_page << 
6860         struct copy_subpage_arg arg = {       << 
6861                 .dst = dst,                   << 
6862                 .src = src,                   << 
6863                 .vma = vma,                   << 
6864         };                                    << 
6865                                               << 
6866         if (unlikely(nr_pages > MAX_ORDER_NR_ << 
6867                 return copy_user_gigantic_pag << 
6868                                               << 
6869         return process_huge_page(addr_hint, n << 
6870 }                                             << 
6871                                               << 
6872 long copy_folio_from_user(struct folio *dst_f << 
6873                            const void __user  << 
6874                            bool allow_pagefau << 
6875 {                                             << 
6876         void *kaddr;                          << 
6877         unsigned long i, rc = 0;                 4420         unsigned long i, rc = 0;
6878         unsigned int nr_pages = folio_nr_page !! 4421         unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
6879         unsigned long ret_val = nr_pages * PA !! 4422 
6880         struct page *subpage;                 !! 4423         for (i = 0; i < pages_per_huge_page; i++) {
6881                                               !! 4424                 if (allow_pagefault)
6882         for (i = 0; i < nr_pages; i++) {      !! 4425                         page_kaddr = kmap(dst_page + i);
6883                 subpage = folio_page(dst_foli !! 4426                 else
6884                 kaddr = kmap_local_page(subpa !! 4427                         page_kaddr = kmap_atomic(dst_page + i);
6885                 if (!allow_pagefault)         !! 4428                 rc = copy_from_user(page_kaddr,
6886                         pagefault_disable();  !! 4429                                 (const void __user *)(src + i * PAGE_SIZE),
6887                 rc = copy_from_user(kaddr, us !! 4430                                 PAGE_SIZE);
6888                 if (!allow_pagefault)         !! 4431                 if (allow_pagefault)
6889                         pagefault_enable();   !! 4432                         kunmap(dst_page + i);
6890                 kunmap_local(kaddr);          !! 4433                 else
                                                   >> 4434                         kunmap_atomic(page_kaddr);
6891                                                  4435 
6892                 ret_val -= (PAGE_SIZE - rc);     4436                 ret_val -= (PAGE_SIZE - rc);
6893                 if (rc)                          4437                 if (rc)
6894                         break;                   4438                         break;
6895                                                  4439 
6896                 flush_dcache_page(subpage);   << 
6897                                               << 
6898                 cond_resched();                  4440                 cond_resched();
6899         }                                        4441         }
6900         return ret_val;                          4442         return ret_val;
6901 }                                                4443 }
6902 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONF    4444 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
6903                                                  4445 
6904 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLO !! 4446 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
6905                                                  4447 
6906 static struct kmem_cache *page_ptl_cachep;       4448 static struct kmem_cache *page_ptl_cachep;
6907                                                  4449 
6908 void __init ptlock_cache_init(void)              4450 void __init ptlock_cache_init(void)
6909 {                                                4451 {
6910         page_ptl_cachep = kmem_cache_create("    4452         page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
6911                         SLAB_PANIC, NULL);       4453                         SLAB_PANIC, NULL);
6912 }                                                4454 }
6913                                                  4455 
6914 bool ptlock_alloc(struct ptdesc *ptdesc)      !! 4456 bool ptlock_alloc(struct page *page)
6915 {                                                4457 {
6916         spinlock_t *ptl;                         4458         spinlock_t *ptl;
6917                                                  4459 
6918         ptl = kmem_cache_alloc(page_ptl_cache    4460         ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
6919         if (!ptl)                                4461         if (!ptl)
6920                 return false;                    4462                 return false;
6921         ptdesc->ptl = ptl;                    !! 4463         page->ptl = ptl;
6922         return true;                             4464         return true;
6923 }                                                4465 }
6924                                                  4466 
6925 void ptlock_free(struct ptdesc *ptdesc)       !! 4467 void ptlock_free(struct page *page)
6926 {                                                4468 {
6927         kmem_cache_free(page_ptl_cachep, ptde !! 4469         kmem_cache_free(page_ptl_cachep, page->ptl);
6928 }                                                4470 }
6929 #endif                                           4471 #endif
6930                                               << 
6931 void vma_pgtable_walk_begin(struct vm_area_st << 
6932 {                                             << 
6933         if (is_vm_hugetlb_page(vma))          << 
6934                 hugetlb_vma_lock_read(vma);   << 
6935 }                                             << 
6936                                               << 
6937 void vma_pgtable_walk_end(struct vm_area_stru << 
6938 {                                             << 
6939         if (is_vm_hugetlb_page(vma))          << 
6940                 hugetlb_vma_unlock_read(vma); << 
6941 }                                             << 
6942                                                  4472 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php