~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/memory.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/memory.c (Version linux-6.12-rc7) and /mm/memory.c (Version linux-4.18.20)


  1                                                << 
  2 // SPDX-License-Identifier: GPL-2.0-only       << 
  3 /*                                                  1 /*
  4  *  linux/mm/memory.c                               2  *  linux/mm/memory.c
  5  *                                                  3  *
  6  *  Copyright (C) 1991, 1992, 1993, 1994  Linu      4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  7  */                                                 5  */
  8                                                     6 
  9 /*                                                  7 /*
 10  * demand-loading started 01.12.91 - seems it       8  * demand-loading started 01.12.91 - seems it is high on the list of
 11  * things wanted, and it should be easy to imp      9  * things wanted, and it should be easy to implement. - Linus
 12  */                                                10  */
 13                                                    11 
 14 /*                                                 12 /*
 15  * Ok, demand-loading was easy, shared pages a     13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
 16  * pages started 02.12.91, seems to work. - Li     14  * pages started 02.12.91, seems to work. - Linus.
 17  *                                                 15  *
 18  * Tested sharing by executing about 30 /bin/s     16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
 19  * would have taken more than the 6M I have fr     17  * would have taken more than the 6M I have free, but it worked well as
 20  * far as I could see.                             18  * far as I could see.
 21  *                                                 19  *
 22  * Also corrected some "invalidate()"s - I was     20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
 23  */                                                21  */
 24                                                    22 
 25 /*                                                 23 /*
 26  * Real VM (paging to/from disk) started 18.12     24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
 27  * thought has to go into this. Oh, well..         25  * thought has to go into this. Oh, well..
 28  * 19.12.91  -  works, somewhat. Sometimes I g     26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
 29  *              Found it. Everything seems to      27  *              Found it. Everything seems to work now.
 30  * 20.12.91  -  Ok, making the swap-device cha     28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
 31  */                                                29  */
 32                                                    30 
 33 /*                                                 31 /*
 34  * 05.04.94  -  Multi-page memory management a     32  * 05.04.94  -  Multi-page memory management added for v1.1.
 35  *              Idea by Alex Bligh (alex@cconc     33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
 36  *                                                 34  *
 37  * 16.07.99  -  Support of BIGMEM added by Ger     35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
 38  *              (Gerhard.Wichert@pdb.siemens.d     36  *              (Gerhard.Wichert@pdb.siemens.de)
 39  *                                                 37  *
 40  * Aug/Sep 2004 Changed to four level page tab     38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
 41  */                                                39  */
 42                                                    40 
 43 #include <linux/kernel_stat.h>                     41 #include <linux/kernel_stat.h>
 44 #include <linux/mm.h>                              42 #include <linux/mm.h>
 45 #include <linux/mm_inline.h>                   << 
 46 #include <linux/sched/mm.h>                        43 #include <linux/sched/mm.h>
 47 #include <linux/sched/coredump.h>                  44 #include <linux/sched/coredump.h>
 48 #include <linux/sched/numa_balancing.h>            45 #include <linux/sched/numa_balancing.h>
 49 #include <linux/sched/task.h>                      46 #include <linux/sched/task.h>
 50 #include <linux/hugetlb.h>                         47 #include <linux/hugetlb.h>
 51 #include <linux/mman.h>                            48 #include <linux/mman.h>
 52 #include <linux/swap.h>                            49 #include <linux/swap.h>
 53 #include <linux/highmem.h>                         50 #include <linux/highmem.h>
 54 #include <linux/pagemap.h>                         51 #include <linux/pagemap.h>
 55 #include <linux/memremap.h>                        52 #include <linux/memremap.h>
 56 #include <linux/kmsan.h>                       << 
 57 #include <linux/ksm.h>                             53 #include <linux/ksm.h>
 58 #include <linux/rmap.h>                            54 #include <linux/rmap.h>
 59 #include <linux/export.h>                          55 #include <linux/export.h>
 60 #include <linux/delayacct.h>                       56 #include <linux/delayacct.h>
 61 #include <linux/init.h>                            57 #include <linux/init.h>
 62 #include <linux/pfn_t.h>                           58 #include <linux/pfn_t.h>
 63 #include <linux/writeback.h>                       59 #include <linux/writeback.h>
 64 #include <linux/memcontrol.h>                      60 #include <linux/memcontrol.h>
 65 #include <linux/mmu_notifier.h>                    61 #include <linux/mmu_notifier.h>
 66 #include <linux/swapops.h>                         62 #include <linux/swapops.h>
 67 #include <linux/elf.h>                             63 #include <linux/elf.h>
 68 #include <linux/gfp.h>                             64 #include <linux/gfp.h>
 69 #include <linux/migrate.h>                         65 #include <linux/migrate.h>
 70 #include <linux/string.h>                          66 #include <linux/string.h>
 71 #include <linux/memory-tiers.h>                !!  67 #include <linux/dma-debug.h>
 72 #include <linux/debugfs.h>                         68 #include <linux/debugfs.h>
 73 #include <linux/userfaultfd_k.h>                   69 #include <linux/userfaultfd_k.h>
 74 #include <linux/dax.h>                             70 #include <linux/dax.h>
 75 #include <linux/oom.h>                             71 #include <linux/oom.h>
 76 #include <linux/numa.h>                        << 
 77 #include <linux/perf_event.h>                  << 
 78 #include <linux/ptrace.h>                      << 
 79 #include <linux/vmalloc.h>                     << 
 80 #include <linux/sched/sysctl.h>                << 
 81                                                << 
 82 #include <trace/events/kmem.h>                 << 
 83                                                    72 
 84 #include <asm/io.h>                                73 #include <asm/io.h>
 85 #include <asm/mmu_context.h>                       74 #include <asm/mmu_context.h>
 86 #include <asm/pgalloc.h>                           75 #include <asm/pgalloc.h>
 87 #include <linux/uaccess.h>                         76 #include <linux/uaccess.h>
 88 #include <asm/tlb.h>                               77 #include <asm/tlb.h>
 89 #include <asm/tlbflush.h>                          78 #include <asm/tlbflush.h>
                                                   >>  79 #include <asm/pgtable.h>
 90                                                    80 
 91 #include "pgalloc-track.h"                     << 
 92 #include "internal.h"                              81 #include "internal.h"
 93 #include "swap.h"                              << 
 94                                                    82 
 95 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) &&      83 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
 96 #warning Unfortunate NUMA and NUMA Balancing c     84 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
 97 #endif                                             85 #endif
 98                                                    86 
 99 #ifndef CONFIG_NUMA                            !!  87 #ifndef CONFIG_NEED_MULTIPLE_NODES
                                                   >>  88 /* use the per-pgdat data instead for discontigmem - mbligh */
100 unsigned long max_mapnr;                           89 unsigned long max_mapnr;
101 EXPORT_SYMBOL(max_mapnr);                          90 EXPORT_SYMBOL(max_mapnr);
102                                                    91 
103 struct page *mem_map;                              92 struct page *mem_map;
104 EXPORT_SYMBOL(mem_map);                            93 EXPORT_SYMBOL(mem_map);
105 #endif                                             94 #endif
106                                                    95 
107 static vm_fault_t do_fault(struct vm_fault *vm << 
108 static vm_fault_t do_anonymous_page(struct vm_ << 
109 static bool vmf_pte_changed(struct vm_fault *v << 
110                                                << 
111 /*                                             << 
112  * Return true if the original pte was a uffd- << 
113  * wr-protected).                              << 
114  */                                            << 
115 static __always_inline bool vmf_orig_pte_uffd_ << 
116 {                                              << 
117         if (!userfaultfd_wp(vmf->vma))         << 
118                 return false;                  << 
119         if (!(vmf->flags & FAULT_FLAG_ORIG_PTE << 
120                 return false;                  << 
121                                                << 
122         return pte_marker_uffd_wp(vmf->orig_pt << 
123 }                                              << 
124                                                << 
125 /*                                                 96 /*
126  * A number of key systems in x86 including io     97  * A number of key systems in x86 including ioremap() rely on the assumption
127  * that high_memory defines the upper bound on     98  * that high_memory defines the upper bound on direct map memory, then end
128  * of ZONE_NORMAL.                             !!  99  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
                                                   >> 100  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
                                                   >> 101  * and ZONE_HIGHMEM.
129  */                                               102  */
130 void *high_memory;                                103 void *high_memory;
131 EXPORT_SYMBOL(high_memory);                       104 EXPORT_SYMBOL(high_memory);
132                                                   105 
133 /*                                                106 /*
134  * Randomize the address space (stacks, mmaps,    107  * Randomize the address space (stacks, mmaps, brk, etc.).
135  *                                                108  *
136  * ( When CONFIG_COMPAT_BRK=y we exclude brk f    109  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
137  *   as ancient (libc5 based) binaries can seg    110  *   as ancient (libc5 based) binaries can segfault. )
138  */                                               111  */
139 int randomize_va_space __read_mostly =            112 int randomize_va_space __read_mostly =
140 #ifdef CONFIG_COMPAT_BRK                          113 #ifdef CONFIG_COMPAT_BRK
141                                         1;        114                                         1;
142 #else                                             115 #else
143                                         2;        116                                         2;
144 #endif                                            117 #endif
145                                                   118 
146 #ifndef arch_wants_old_prefaulted_pte          << 
147 static inline bool arch_wants_old_prefaulted_p << 
148 {                                              << 
149         /*                                     << 
150          * Transitioning a PTE from 'old' to ' << 
151          * some architectures, even if it's pe << 
152          * default, "false" means prefaulted e << 
153          */                                    << 
154         return false;                          << 
155 }                                              << 
156 #endif                                         << 
157                                                << 
158 static int __init disable_randmaps(char *s)       119 static int __init disable_randmaps(char *s)
159 {                                                 120 {
160         randomize_va_space = 0;                   121         randomize_va_space = 0;
161         return 1;                                 122         return 1;
162 }                                                 123 }
163 __setup("norandmaps", disable_randmaps);          124 __setup("norandmaps", disable_randmaps);
164                                                   125 
165 unsigned long zero_pfn __read_mostly;             126 unsigned long zero_pfn __read_mostly;
166 EXPORT_SYMBOL(zero_pfn);                          127 EXPORT_SYMBOL(zero_pfn);
167                                                   128 
168 unsigned long highest_memmap_pfn __read_mostly    129 unsigned long highest_memmap_pfn __read_mostly;
169                                                   130 
170 /*                                                131 /*
171  * CONFIG_MMU architectures set up ZERO_PAGE i    132  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
172  */                                               133  */
173 static int __init init_zero_pfn(void)             134 static int __init init_zero_pfn(void)
174 {                                                 135 {
175         zero_pfn = page_to_pfn(ZERO_PAGE(0));     136         zero_pfn = page_to_pfn(ZERO_PAGE(0));
176         return 0;                                 137         return 0;
177 }                                                 138 }
178 early_initcall(init_zero_pfn);                 !! 139 core_initcall(init_zero_pfn);
                                                   >> 140 
                                                   >> 141 
                                                   >> 142 #if defined(SPLIT_RSS_COUNTING)
                                                   >> 143 
                                                   >> 144 void sync_mm_rss(struct mm_struct *mm)
                                                   >> 145 {
                                                   >> 146         int i;
                                                   >> 147 
                                                   >> 148         for (i = 0; i < NR_MM_COUNTERS; i++) {
                                                   >> 149                 if (current->rss_stat.count[i]) {
                                                   >> 150                         add_mm_counter(mm, i, current->rss_stat.count[i]);
                                                   >> 151                         current->rss_stat.count[i] = 0;
                                                   >> 152                 }
                                                   >> 153         }
                                                   >> 154         current->rss_stat.events = 0;
                                                   >> 155 }
                                                   >> 156 
                                                   >> 157 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
                                                   >> 158 {
                                                   >> 159         struct task_struct *task = current;
                                                   >> 160 
                                                   >> 161         if (likely(task->mm == mm))
                                                   >> 162                 task->rss_stat.count[member] += val;
                                                   >> 163         else
                                                   >> 164                 add_mm_counter(mm, member, val);
                                                   >> 165 }
                                                   >> 166 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
                                                   >> 167 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
                                                   >> 168 
                                                   >> 169 /* sync counter once per 64 page faults */
                                                   >> 170 #define TASK_RSS_EVENTS_THRESH  (64)
                                                   >> 171 static void check_sync_rss_stat(struct task_struct *task)
                                                   >> 172 {
                                                   >> 173         if (unlikely(task != current))
                                                   >> 174                 return;
                                                   >> 175         if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
                                                   >> 176                 sync_mm_rss(task->mm);
                                                   >> 177 }
                                                   >> 178 #else /* SPLIT_RSS_COUNTING */
                                                   >> 179 
                                                   >> 180 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
                                                   >> 181 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
                                                   >> 182 
                                                   >> 183 static void check_sync_rss_stat(struct task_struct *task)
                                                   >> 184 {
                                                   >> 185 }
                                                   >> 186 
                                                   >> 187 #endif /* SPLIT_RSS_COUNTING */
                                                   >> 188 
                                                   >> 189 #ifdef HAVE_GENERIC_MMU_GATHER
                                                   >> 190 
                                                   >> 191 static bool tlb_next_batch(struct mmu_gather *tlb)
                                                   >> 192 {
                                                   >> 193         struct mmu_gather_batch *batch;
                                                   >> 194 
                                                   >> 195         batch = tlb->active;
                                                   >> 196         if (batch->next) {
                                                   >> 197                 tlb->active = batch->next;
                                                   >> 198                 return true;
                                                   >> 199         }
                                                   >> 200 
                                                   >> 201         if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
                                                   >> 202                 return false;
                                                   >> 203 
                                                   >> 204         batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
                                                   >> 205         if (!batch)
                                                   >> 206                 return false;
                                                   >> 207 
                                                   >> 208         tlb->batch_count++;
                                                   >> 209         batch->next = NULL;
                                                   >> 210         batch->nr   = 0;
                                                   >> 211         batch->max  = MAX_GATHER_BATCH;
                                                   >> 212 
                                                   >> 213         tlb->active->next = batch;
                                                   >> 214         tlb->active = batch;
                                                   >> 215 
                                                   >> 216         return true;
                                                   >> 217 }
                                                   >> 218 
                                                   >> 219 void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                                                   >> 220                                 unsigned long start, unsigned long end)
                                                   >> 221 {
                                                   >> 222         tlb->mm = mm;
                                                   >> 223 
                                                   >> 224         /* Is it from 0 to ~0? */
                                                   >> 225         tlb->fullmm     = !(start | (end+1));
                                                   >> 226         tlb->need_flush_all = 0;
                                                   >> 227         tlb->local.next = NULL;
                                                   >> 228         tlb->local.nr   = 0;
                                                   >> 229         tlb->local.max  = ARRAY_SIZE(tlb->__pages);
                                                   >> 230         tlb->active     = &tlb->local;
                                                   >> 231         tlb->batch_count = 0;
                                                   >> 232 
                                                   >> 233 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
                                                   >> 234         tlb->batch = NULL;
                                                   >> 235 #endif
                                                   >> 236         tlb->page_size = 0;
                                                   >> 237 
                                                   >> 238         __tlb_reset_range(tlb);
                                                   >> 239 }
                                                   >> 240 
                                                   >> 241 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
                                                   >> 242 {
                                                   >> 243         if (!tlb->end)
                                                   >> 244                 return;
179                                                   245 
180 void mm_trace_rss_stat(struct mm_struct *mm, i !! 246         tlb_flush(tlb);
                                                   >> 247         mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
                                                   >> 248         __tlb_reset_range(tlb);
                                                   >> 249 }
                                                   >> 250 
                                                   >> 251 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
                                                   >> 252 {
                                                   >> 253         struct mmu_gather_batch *batch;
                                                   >> 254 
                                                   >> 255 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
                                                   >> 256         tlb_table_flush(tlb);
                                                   >> 257 #endif
                                                   >> 258         for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
                                                   >> 259                 free_pages_and_swap_cache(batch->pages, batch->nr);
                                                   >> 260                 batch->nr = 0;
                                                   >> 261         }
                                                   >> 262         tlb->active = &tlb->local;
                                                   >> 263 }
                                                   >> 264 
                                                   >> 265 void tlb_flush_mmu(struct mmu_gather *tlb)
                                                   >> 266 {
                                                   >> 267         tlb_flush_mmu_tlbonly(tlb);
                                                   >> 268         tlb_flush_mmu_free(tlb);
                                                   >> 269 }
                                                   >> 270 
                                                   >> 271 /* tlb_finish_mmu
                                                   >> 272  *      Called at the end of the shootdown operation to free up any resources
                                                   >> 273  *      that were required.
                                                   >> 274  */
                                                   >> 275 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
                                                   >> 276                 unsigned long start, unsigned long end, bool force)
                                                   >> 277 {
                                                   >> 278         struct mmu_gather_batch *batch, *next;
                                                   >> 279 
                                                   >> 280         if (force)
                                                   >> 281                 __tlb_adjust_range(tlb, start, end - start);
                                                   >> 282 
                                                   >> 283         tlb_flush_mmu(tlb);
                                                   >> 284 
                                                   >> 285         /* keep the page table cache within bounds */
                                                   >> 286         check_pgt_cache();
                                                   >> 287 
                                                   >> 288         for (batch = tlb->local.next; batch; batch = next) {
                                                   >> 289                 next = batch->next;
                                                   >> 290                 free_pages((unsigned long)batch, 0);
                                                   >> 291         }
                                                   >> 292         tlb->local.next = NULL;
                                                   >> 293 }
                                                   >> 294 
                                                   >> 295 /* __tlb_remove_page
                                                   >> 296  *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
                                                   >> 297  *      handling the additional races in SMP caused by other CPUs caching valid
                                                   >> 298  *      mappings in their TLBs. Returns the number of free page slots left.
                                                   >> 299  *      When out of page slots we must call tlb_flush_mmu().
                                                   >> 300  *returns true if the caller should flush.
                                                   >> 301  */
                                                   >> 302 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
181 {                                                 303 {
182         trace_rss_stat(mm, member);            !! 304         struct mmu_gather_batch *batch;
                                                   >> 305 
                                                   >> 306         VM_BUG_ON(!tlb->end);
                                                   >> 307         VM_WARN_ON(tlb->page_size != page_size);
                                                   >> 308 
                                                   >> 309         batch = tlb->active;
                                                   >> 310         /*
                                                   >> 311          * Add the page and check if we are full. If so
                                                   >> 312          * force a flush.
                                                   >> 313          */
                                                   >> 314         batch->pages[batch->nr++] = page;
                                                   >> 315         if (batch->nr == batch->max) {
                                                   >> 316                 if (!tlb_next_batch(tlb))
                                                   >> 317                         return true;
                                                   >> 318                 batch = tlb->active;
                                                   >> 319         }
                                                   >> 320         VM_BUG_ON_PAGE(batch->nr > batch->max, page);
                                                   >> 321 
                                                   >> 322         return false;
                                                   >> 323 }
                                                   >> 324 
                                                   >> 325 #endif /* HAVE_GENERIC_MMU_GATHER */
                                                   >> 326 
                                                   >> 327 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
                                                   >> 328 
                                                   >> 329 /*
                                                   >> 330  * See the comment near struct mmu_table_batch.
                                                   >> 331  */
                                                   >> 332 
                                                   >> 333 /*
                                                   >> 334  * If we want tlb_remove_table() to imply TLB invalidates.
                                                   >> 335  */
                                                   >> 336 static inline void tlb_table_invalidate(struct mmu_gather *tlb)
                                                   >> 337 {
                                                   >> 338 #ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
                                                   >> 339         /*
                                                   >> 340          * Invalidate page-table caches used by hardware walkers. Then we still
                                                   >> 341          * need to RCU-sched wait while freeing the pages because software
                                                   >> 342          * walkers can still be in-flight.
                                                   >> 343          */
                                                   >> 344         tlb_flush_mmu_tlbonly(tlb);
                                                   >> 345 #endif
                                                   >> 346 }
                                                   >> 347 
                                                   >> 348 static void tlb_remove_table_smp_sync(void *arg)
                                                   >> 349 {
                                                   >> 350         /* Simply deliver the interrupt */
                                                   >> 351 }
                                                   >> 352 
                                                   >> 353 static void tlb_remove_table_one(void *table)
                                                   >> 354 {
                                                   >> 355         /*
                                                   >> 356          * This isn't an RCU grace period and hence the page-tables cannot be
                                                   >> 357          * assumed to be actually RCU-freed.
                                                   >> 358          *
                                                   >> 359          * It is however sufficient for software page-table walkers that rely on
                                                   >> 360          * IRQ disabling. See the comment near struct mmu_table_batch.
                                                   >> 361          */
                                                   >> 362         smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
                                                   >> 363         __tlb_remove_table(table);
                                                   >> 364 }
                                                   >> 365 
                                                   >> 366 static void tlb_remove_table_rcu(struct rcu_head *head)
                                                   >> 367 {
                                                   >> 368         struct mmu_table_batch *batch;
                                                   >> 369         int i;
                                                   >> 370 
                                                   >> 371         batch = container_of(head, struct mmu_table_batch, rcu);
                                                   >> 372 
                                                   >> 373         for (i = 0; i < batch->nr; i++)
                                                   >> 374                 __tlb_remove_table(batch->tables[i]);
                                                   >> 375 
                                                   >> 376         free_page((unsigned long)batch);
                                                   >> 377 }
                                                   >> 378 
                                                   >> 379 void tlb_table_flush(struct mmu_gather *tlb)
                                                   >> 380 {
                                                   >> 381         struct mmu_table_batch **batch = &tlb->batch;
                                                   >> 382 
                                                   >> 383         if (*batch) {
                                                   >> 384                 tlb_table_invalidate(tlb);
                                                   >> 385                 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
                                                   >> 386                 *batch = NULL;
                                                   >> 387         }
                                                   >> 388 }
                                                   >> 389 
                                                   >> 390 void tlb_remove_table(struct mmu_gather *tlb, void *table)
                                                   >> 391 {
                                                   >> 392         struct mmu_table_batch **batch = &tlb->batch;
                                                   >> 393 
                                                   >> 394         if (*batch == NULL) {
                                                   >> 395                 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
                                                   >> 396                 if (*batch == NULL) {
                                                   >> 397                         tlb_table_invalidate(tlb);
                                                   >> 398                         tlb_remove_table_one(table);
                                                   >> 399                         return;
                                                   >> 400                 }
                                                   >> 401                 (*batch)->nr = 0;
                                                   >> 402         }
                                                   >> 403 
                                                   >> 404         (*batch)->tables[(*batch)->nr++] = table;
                                                   >> 405         if ((*batch)->nr == MAX_TABLE_BATCH)
                                                   >> 406                 tlb_table_flush(tlb);
                                                   >> 407 }
                                                   >> 408 
                                                   >> 409 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
                                                   >> 410 
                                                   >> 411 /**
                                                   >> 412  * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
                                                   >> 413  * @tlb: the mmu_gather structure to initialize
                                                   >> 414  * @mm: the mm_struct of the target address space
                                                   >> 415  * @start: start of the region that will be removed from the page-table
                                                   >> 416  * @end: end of the region that will be removed from the page-table
                                                   >> 417  *
                                                   >> 418  * Called to initialize an (on-stack) mmu_gather structure for page-table
                                                   >> 419  * tear-down from @mm. The @start and @end are set to 0 and -1
                                                   >> 420  * respectively when @mm is without users and we're going to destroy
                                                   >> 421  * the full address space (exit/execve).
                                                   >> 422  */
                                                   >> 423 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                                                   >> 424                         unsigned long start, unsigned long end)
                                                   >> 425 {
                                                   >> 426         arch_tlb_gather_mmu(tlb, mm, start, end);
                                                   >> 427         inc_tlb_flush_pending(tlb->mm);
                                                   >> 428 }
                                                   >> 429 
                                                   >> 430 void tlb_finish_mmu(struct mmu_gather *tlb,
                                                   >> 431                 unsigned long start, unsigned long end)
                                                   >> 432 {
                                                   >> 433         /*
                                                   >> 434          * If there are parallel threads are doing PTE changes on same range
                                                   >> 435          * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
                                                   >> 436          * flush by batching, a thread has stable TLB entry can fail to flush
                                                   >> 437          * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
                                                   >> 438          * forcefully if we detect parallel PTE batching threads.
                                                   >> 439          */
                                                   >> 440         bool force = mm_tlb_flush_nested(tlb->mm);
                                                   >> 441 
                                                   >> 442         arch_tlb_finish_mmu(tlb, start, end, force);
                                                   >> 443         dec_tlb_flush_pending(tlb->mm);
183 }                                                 444 }
184                                                   445 
185 /*                                                446 /*
186  * Note: this doesn't free the actual pages th    447  * Note: this doesn't free the actual pages themselves. That
187  * has been handled earlier when unmapping all    448  * has been handled earlier when unmapping all the memory regions.
188  */                                               449  */
189 static void free_pte_range(struct mmu_gather *    450 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
190                            unsigned long addr)    451                            unsigned long addr)
191 {                                                 452 {
192         pgtable_t token = pmd_pgtable(*pmd);      453         pgtable_t token = pmd_pgtable(*pmd);
193         pmd_clear(pmd);                           454         pmd_clear(pmd);
194         pte_free_tlb(tlb, token, addr);           455         pte_free_tlb(tlb, token, addr);
195         mm_dec_nr_ptes(tlb->mm);                  456         mm_dec_nr_ptes(tlb->mm);
196 }                                                 457 }
197                                                   458 
198 static inline void free_pmd_range(struct mmu_g    459 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
199                                 unsigned long     460                                 unsigned long addr, unsigned long end,
200                                 unsigned long     461                                 unsigned long floor, unsigned long ceiling)
201 {                                                 462 {
202         pmd_t *pmd;                               463         pmd_t *pmd;
203         unsigned long next;                       464         unsigned long next;
204         unsigned long start;                      465         unsigned long start;
205                                                   466 
206         start = addr;                             467         start = addr;
207         pmd = pmd_offset(pud, addr);              468         pmd = pmd_offset(pud, addr);
208         do {                                      469         do {
209                 next = pmd_addr_end(addr, end)    470                 next = pmd_addr_end(addr, end);
210                 if (pmd_none_or_clear_bad(pmd)    471                 if (pmd_none_or_clear_bad(pmd))
211                         continue;                 472                         continue;
212                 free_pte_range(tlb, pmd, addr)    473                 free_pte_range(tlb, pmd, addr);
213         } while (pmd++, addr = next, addr != e    474         } while (pmd++, addr = next, addr != end);
214                                                   475 
215         start &= PUD_MASK;                        476         start &= PUD_MASK;
216         if (start < floor)                        477         if (start < floor)
217                 return;                           478                 return;
218         if (ceiling) {                            479         if (ceiling) {
219                 ceiling &= PUD_MASK;              480                 ceiling &= PUD_MASK;
220                 if (!ceiling)                     481                 if (!ceiling)
221                         return;                   482                         return;
222         }                                         483         }
223         if (end - 1 > ceiling - 1)                484         if (end - 1 > ceiling - 1)
224                 return;                           485                 return;
225                                                   486 
226         pmd = pmd_offset(pud, start);             487         pmd = pmd_offset(pud, start);
227         pud_clear(pud);                           488         pud_clear(pud);
228         pmd_free_tlb(tlb, pmd, start);            489         pmd_free_tlb(tlb, pmd, start);
229         mm_dec_nr_pmds(tlb->mm);                  490         mm_dec_nr_pmds(tlb->mm);
230 }                                                 491 }
231                                                   492 
232 static inline void free_pud_range(struct mmu_g    493 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
233                                 unsigned long     494                                 unsigned long addr, unsigned long end,
234                                 unsigned long     495                                 unsigned long floor, unsigned long ceiling)
235 {                                                 496 {
236         pud_t *pud;                               497         pud_t *pud;
237         unsigned long next;                       498         unsigned long next;
238         unsigned long start;                      499         unsigned long start;
239                                                   500 
240         start = addr;                             501         start = addr;
241         pud = pud_offset(p4d, addr);              502         pud = pud_offset(p4d, addr);
242         do {                                      503         do {
243                 next = pud_addr_end(addr, end)    504                 next = pud_addr_end(addr, end);
244                 if (pud_none_or_clear_bad(pud)    505                 if (pud_none_or_clear_bad(pud))
245                         continue;                 506                         continue;
246                 free_pmd_range(tlb, pud, addr,    507                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
247         } while (pud++, addr = next, addr != e    508         } while (pud++, addr = next, addr != end);
248                                                   509 
249         start &= P4D_MASK;                        510         start &= P4D_MASK;
250         if (start < floor)                        511         if (start < floor)
251                 return;                           512                 return;
252         if (ceiling) {                            513         if (ceiling) {
253                 ceiling &= P4D_MASK;              514                 ceiling &= P4D_MASK;
254                 if (!ceiling)                     515                 if (!ceiling)
255                         return;                   516                         return;
256         }                                         517         }
257         if (end - 1 > ceiling - 1)                518         if (end - 1 > ceiling - 1)
258                 return;                           519                 return;
259                                                   520 
260         pud = pud_offset(p4d, start);             521         pud = pud_offset(p4d, start);
261         p4d_clear(p4d);                           522         p4d_clear(p4d);
262         pud_free_tlb(tlb, pud, start);            523         pud_free_tlb(tlb, pud, start);
263         mm_dec_nr_puds(tlb->mm);                  524         mm_dec_nr_puds(tlb->mm);
264 }                                                 525 }
265                                                   526 
266 static inline void free_p4d_range(struct mmu_g    527 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
267                                 unsigned long     528                                 unsigned long addr, unsigned long end,
268                                 unsigned long     529                                 unsigned long floor, unsigned long ceiling)
269 {                                                 530 {
270         p4d_t *p4d;                               531         p4d_t *p4d;
271         unsigned long next;                       532         unsigned long next;
272         unsigned long start;                      533         unsigned long start;
273                                                   534 
274         start = addr;                             535         start = addr;
275         p4d = p4d_offset(pgd, addr);              536         p4d = p4d_offset(pgd, addr);
276         do {                                      537         do {
277                 next = p4d_addr_end(addr, end)    538                 next = p4d_addr_end(addr, end);
278                 if (p4d_none_or_clear_bad(p4d)    539                 if (p4d_none_or_clear_bad(p4d))
279                         continue;                 540                         continue;
280                 free_pud_range(tlb, p4d, addr,    541                 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
281         } while (p4d++, addr = next, addr != e    542         } while (p4d++, addr = next, addr != end);
282                                                   543 
283         start &= PGDIR_MASK;                      544         start &= PGDIR_MASK;
284         if (start < floor)                        545         if (start < floor)
285                 return;                           546                 return;
286         if (ceiling) {                            547         if (ceiling) {
287                 ceiling &= PGDIR_MASK;            548                 ceiling &= PGDIR_MASK;
288                 if (!ceiling)                     549                 if (!ceiling)
289                         return;                   550                         return;
290         }                                         551         }
291         if (end - 1 > ceiling - 1)                552         if (end - 1 > ceiling - 1)
292                 return;                           553                 return;
293                                                   554 
294         p4d = p4d_offset(pgd, start);             555         p4d = p4d_offset(pgd, start);
295         pgd_clear(pgd);                           556         pgd_clear(pgd);
296         p4d_free_tlb(tlb, p4d, start);            557         p4d_free_tlb(tlb, p4d, start);
297 }                                                 558 }
298                                                   559 
299 /*                                                560 /*
300  * This function frees user-level page tables     561  * This function frees user-level page tables of a process.
301  */                                               562  */
302 void free_pgd_range(struct mmu_gather *tlb,       563 void free_pgd_range(struct mmu_gather *tlb,
303                         unsigned long addr, un    564                         unsigned long addr, unsigned long end,
304                         unsigned long floor, u    565                         unsigned long floor, unsigned long ceiling)
305 {                                                 566 {
306         pgd_t *pgd;                               567         pgd_t *pgd;
307         unsigned long next;                       568         unsigned long next;
308                                                   569 
309         /*                                        570         /*
310          * The next few lines have given us lo    571          * The next few lines have given us lots of grief...
311          *                                        572          *
312          * Why are we testing PMD* at this top    573          * Why are we testing PMD* at this top level?  Because often
313          * there will be no work to do at all,    574          * there will be no work to do at all, and we'd prefer not to
314          * go all the way down to the bottom j    575          * go all the way down to the bottom just to discover that.
315          *                                        576          *
316          * Why all these "- 1"s?  Because 0 re    577          * Why all these "- 1"s?  Because 0 represents both the bottom
317          * of the address space and the top of    578          * of the address space and the top of it (using -1 for the
318          * top wouldn't help much: the masks w    579          * top wouldn't help much: the masks would do the wrong thing).
319          * The rule is that addr 0 and floor 0    580          * The rule is that addr 0 and floor 0 refer to the bottom of
320          * the address space, but end 0 and ce    581          * the address space, but end 0 and ceiling 0 refer to the top
321          * Comparisons need to use "end - 1" a    582          * Comparisons need to use "end - 1" and "ceiling - 1" (though
322          * that end 0 case should be mythical)    583          * that end 0 case should be mythical).
323          *                                        584          *
324          * Wherever addr is brought up or ceil    585          * Wherever addr is brought up or ceiling brought down, we must
325          * be careful to reject "the opposite     586          * be careful to reject "the opposite 0" before it confuses the
326          * subsequent tests.  But what about w    587          * subsequent tests.  But what about where end is brought down
327          * by PMD_SIZE below? no, end can't go    588          * by PMD_SIZE below? no, end can't go down to 0 there.
328          *                                        589          *
329          * Whereas we round start (addr) and c    590          * Whereas we round start (addr) and ceiling down, by different
330          * masks at different levels, in order    591          * masks at different levels, in order to test whether a table
331          * now has no other vmas using it, so     592          * now has no other vmas using it, so can be freed, we don't
332          * bother to round floor or end up - t    593          * bother to round floor or end up - the tests don't need that.
333          */                                       594          */
334                                                   595 
335         addr &= PMD_MASK;                         596         addr &= PMD_MASK;
336         if (addr < floor) {                       597         if (addr < floor) {
337                 addr += PMD_SIZE;                 598                 addr += PMD_SIZE;
338                 if (!addr)                        599                 if (!addr)
339                         return;                   600                         return;
340         }                                         601         }
341         if (ceiling) {                            602         if (ceiling) {
342                 ceiling &= PMD_MASK;              603                 ceiling &= PMD_MASK;
343                 if (!ceiling)                     604                 if (!ceiling)
344                         return;                   605                         return;
345         }                                         606         }
346         if (end - 1 > ceiling - 1)                607         if (end - 1 > ceiling - 1)
347                 end -= PMD_SIZE;                  608                 end -= PMD_SIZE;
348         if (addr > end - 1)                       609         if (addr > end - 1)
349                 return;                           610                 return;
350         /*                                        611         /*
351          * We add page table cache pages with     612          * We add page table cache pages with PAGE_SIZE,
352          * (see pte_free_tlb()), flush the tlb    613          * (see pte_free_tlb()), flush the tlb if we need
353          */                                       614          */
354         tlb_change_page_size(tlb, PAGE_SIZE);  !! 615         tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
355         pgd = pgd_offset(tlb->mm, addr);          616         pgd = pgd_offset(tlb->mm, addr);
356         do {                                      617         do {
357                 next = pgd_addr_end(addr, end)    618                 next = pgd_addr_end(addr, end);
358                 if (pgd_none_or_clear_bad(pgd)    619                 if (pgd_none_or_clear_bad(pgd))
359                         continue;                 620                         continue;
360                 free_p4d_range(tlb, pgd, addr,    621                 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
361         } while (pgd++, addr = next, addr != e    622         } while (pgd++, addr = next, addr != end);
362 }                                                 623 }
363                                                   624 
364 void free_pgtables(struct mmu_gather *tlb, str !! 625 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
365                    struct vm_area_struct *vma, !! 626                 unsigned long floor, unsigned long ceiling)
366                    unsigned long ceiling, bool << 
367 {                                                 627 {
368         struct unlink_vma_file_batch vb;       !! 628         while (vma) {
369                                                !! 629                 struct vm_area_struct *next = vma->vm_next;
370         do {                                   << 
371                 unsigned long addr = vma->vm_s    630                 unsigned long addr = vma->vm_start;
372                 struct vm_area_struct *next;   << 
373                                                << 
374                 /*                             << 
375                  * Note: USER_PGTABLES_CEILING << 
376                  * be 0.  This will underflow  << 
377                  */                            << 
378                 next = mas_find(mas, ceiling - << 
379                 if (unlikely(xa_is_zero(next)) << 
380                         next = NULL;           << 
381                                                   631 
382                 /*                                632                 /*
383                  * Hide vma from rmap and trun    633                  * Hide vma from rmap and truncate_pagecache before freeing
384                  * pgtables                       634                  * pgtables
385                  */                               635                  */
386                 if (mm_wr_locked)              << 
387                         vma_start_write(vma);  << 
388                 unlink_anon_vmas(vma);            636                 unlink_anon_vmas(vma);
                                                   >> 637                 unlink_file_vma(vma);
389                                                   638 
390                 if (is_vm_hugetlb_page(vma)) {    639                 if (is_vm_hugetlb_page(vma)) {
391                         unlink_file_vma(vma);  << 
392                         hugetlb_free_pgd_range    640                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
393                                 floor, next ?     641                                 floor, next ? next->vm_start : ceiling);
394                 } else {                          642                 } else {
395                         unlink_file_vma_batch_ << 
396                         unlink_file_vma_batch_ << 
397                                                << 
398                         /*                        643                         /*
399                          * Optimization: gathe    644                          * Optimization: gather nearby vmas into one call down
400                          */                       645                          */
401                         while (next && next->v    646                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
402                                && !is_vm_huget    647                                && !is_vm_hugetlb_page(next)) {
403                                 vma = next;       648                                 vma = next;
404                                 next = mas_fin !! 649                                 next = vma->vm_next;
405                                 if (unlikely(x << 
406                                         next = << 
407                                 if (mm_wr_lock << 
408                                         vma_st << 
409                                 unlink_anon_vm    650                                 unlink_anon_vmas(vma);
410                                 unlink_file_vm !! 651                                 unlink_file_vma(vma);
411                         }                         652                         }
412                         unlink_file_vma_batch_ << 
413                         free_pgd_range(tlb, ad    653                         free_pgd_range(tlb, addr, vma->vm_end,
414                                 floor, next ?     654                                 floor, next ? next->vm_start : ceiling);
415                 }                                 655                 }
416                 vma = next;                       656                 vma = next;
417         } while (vma);                         !! 657         }
418 }                                                 658 }
419                                                   659 
420 void pmd_install(struct mm_struct *mm, pmd_t * !! 660 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
421 {                                                 661 {
422         spinlock_t *ptl = pmd_lock(mm, pmd);   !! 662         spinlock_t *ptl;
                                                   >> 663         pgtable_t new = pte_alloc_one(mm, address);
                                                   >> 664         if (!new)
                                                   >> 665                 return -ENOMEM;
423                                                   666 
                                                   >> 667         /*
                                                   >> 668          * Ensure all pte setup (eg. pte page lock and page clearing) are
                                                   >> 669          * visible before the pte is made visible to other CPUs by being
                                                   >> 670          * put into page tables.
                                                   >> 671          *
                                                   >> 672          * The other side of the story is the pointer chasing in the page
                                                   >> 673          * table walking code (when walking the page table without locking;
                                                   >> 674          * ie. most of the time). Fortunately, these data accesses consist
                                                   >> 675          * of a chain of data-dependent loads, meaning most CPUs (alpha
                                                   >> 676          * being the notable exception) will already guarantee loads are
                                                   >> 677          * seen in-order. See the alpha page table accessors for the
                                                   >> 678          * smp_read_barrier_depends() barriers in page table walking code.
                                                   >> 679          */
                                                   >> 680         smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
                                                   >> 681 
                                                   >> 682         ptl = pmd_lock(mm, pmd);
424         if (likely(pmd_none(*pmd))) {   /* Has    683         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
425                 mm_inc_nr_ptes(mm);               684                 mm_inc_nr_ptes(mm);
426                 /*                             !! 685                 pmd_populate(mm, pmd, new);
427                  * Ensure all pte setup (eg. p !! 686                 new = NULL;
428                  * visible before the pte is m << 
429                  * put into page tables.       << 
430                  *                             << 
431                  * The other side of the story << 
432                  * table walking code (when wa << 
433                  * ie. most of the time). Fort << 
434                  * of a chain of data-dependen << 
435                  * being the notable exception << 
436                  * seen in-order. See the alph << 
437                  * smp_rmb() barriers in page  << 
438                  */                            << 
439                 smp_wmb(); /* Could be smp_wmb << 
440                 pmd_populate(mm, pmd, *pte);   << 
441                 *pte = NULL;                   << 
442         }                                         687         }
443         spin_unlock(ptl);                         688         spin_unlock(ptl);
444 }                                              << 
445                                                << 
446 int __pte_alloc(struct mm_struct *mm, pmd_t *p << 
447 {                                              << 
448         pgtable_t new = pte_alloc_one(mm);     << 
449         if (!new)                              << 
450                 return -ENOMEM;                << 
451                                                << 
452         pmd_install(mm, pmd, &new);            << 
453         if (new)                                  689         if (new)
454                 pte_free(mm, new);                690                 pte_free(mm, new);
455         return 0;                                 691         return 0;
456 }                                                 692 }
457                                                   693 
458 int __pte_alloc_kernel(pmd_t *pmd)             !! 694 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
459 {                                                 695 {
460         pte_t *new = pte_alloc_one_kernel(&ini !! 696         pte_t *new = pte_alloc_one_kernel(&init_mm, address);
461         if (!new)                                 697         if (!new)
462                 return -ENOMEM;                   698                 return -ENOMEM;
463                                                   699 
                                                   >> 700         smp_wmb(); /* See comment in __pte_alloc */
                                                   >> 701 
464         spin_lock(&init_mm.page_table_lock);      702         spin_lock(&init_mm.page_table_lock);
465         if (likely(pmd_none(*pmd))) {   /* Has    703         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
466                 smp_wmb(); /* See comment in p << 
467                 pmd_populate_kernel(&init_mm,     704                 pmd_populate_kernel(&init_mm, pmd, new);
468                 new = NULL;                       705                 new = NULL;
469         }                                         706         }
470         spin_unlock(&init_mm.page_table_lock);    707         spin_unlock(&init_mm.page_table_lock);
471         if (new)                                  708         if (new)
472                 pte_free_kernel(&init_mm, new)    709                 pte_free_kernel(&init_mm, new);
473         return 0;                                 710         return 0;
474 }                                                 711 }
475                                                   712 
476 static inline void init_rss_vec(int *rss)         713 static inline void init_rss_vec(int *rss)
477 {                                                 714 {
478         memset(rss, 0, sizeof(int) * NR_MM_COU    715         memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
479 }                                                 716 }
480                                                   717 
481 static inline void add_mm_rss_vec(struct mm_st    718 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
482 {                                                 719 {
483         int i;                                    720         int i;
484                                                   721 
                                                   >> 722         if (current->mm == mm)
                                                   >> 723                 sync_mm_rss(mm);
485         for (i = 0; i < NR_MM_COUNTERS; i++)      724         for (i = 0; i < NR_MM_COUNTERS; i++)
486                 if (rss[i])                       725                 if (rss[i])
487                         add_mm_counter(mm, i,     726                         add_mm_counter(mm, i, rss[i]);
488 }                                                 727 }
489                                                   728 
490 /*                                                729 /*
491  * This function is called to print an error w    730  * This function is called to print an error when a bad pte
492  * is found. For example, we might have a PFN-    731  * is found. For example, we might have a PFN-mapped pte in
493  * a region that doesn't allow it.                732  * a region that doesn't allow it.
494  *                                                733  *
495  * The calling function must still handle the     734  * The calling function must still handle the error.
496  */                                               735  */
497 static void print_bad_pte(struct vm_area_struc    736 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
498                           pte_t pte, struct pa    737                           pte_t pte, struct page *page)
499 {                                                 738 {
500         pgd_t *pgd = pgd_offset(vma->vm_mm, ad    739         pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
501         p4d_t *p4d = p4d_offset(pgd, addr);       740         p4d_t *p4d = p4d_offset(pgd, addr);
502         pud_t *pud = pud_offset(p4d, addr);       741         pud_t *pud = pud_offset(p4d, addr);
503         pmd_t *pmd = pmd_offset(pud, addr);       742         pmd_t *pmd = pmd_offset(pud, addr);
504         struct address_space *mapping;            743         struct address_space *mapping;
505         pgoff_t index;                            744         pgoff_t index;
506         static unsigned long resume;              745         static unsigned long resume;
507         static unsigned long nr_shown;            746         static unsigned long nr_shown;
508         static unsigned long nr_unshown;          747         static unsigned long nr_unshown;
509                                                   748 
510         /*                                        749         /*
511          * Allow a burst of 60 reports, then k    750          * Allow a burst of 60 reports, then keep quiet for that minute;
512          * or allow a steady drip of one repor    751          * or allow a steady drip of one report per second.
513          */                                       752          */
514         if (nr_shown == 60) {                     753         if (nr_shown == 60) {
515                 if (time_before(jiffies, resum    754                 if (time_before(jiffies, resume)) {
516                         nr_unshown++;             755                         nr_unshown++;
517                         return;                   756                         return;
518                 }                                 757                 }
519                 if (nr_unshown) {                 758                 if (nr_unshown) {
520                         pr_alert("BUG: Bad pag    759                         pr_alert("BUG: Bad page map: %lu messages suppressed\n",
521                                  nr_unshown);     760                                  nr_unshown);
522                         nr_unshown = 0;           761                         nr_unshown = 0;
523                 }                                 762                 }
524                 nr_shown = 0;                     763                 nr_shown = 0;
525         }                                         764         }
526         if (nr_shown++ == 0)                      765         if (nr_shown++ == 0)
527                 resume = jiffies + 60 * HZ;       766                 resume = jiffies + 60 * HZ;
528                                                   767 
529         mapping = vma->vm_file ? vma->vm_file-    768         mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
530         index = linear_page_index(vma, addr);     769         index = linear_page_index(vma, addr);
531                                                   770 
532         pr_alert("BUG: Bad page map in process    771         pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
533                  current->comm,                   772                  current->comm,
534                  (long long)pte_val(pte), (lon    773                  (long long)pte_val(pte), (long long)pmd_val(*pmd));
535         if (page)                                 774         if (page)
536                 dump_page(page, "bad pte");       775                 dump_page(page, "bad pte");
537         pr_alert("addr:%px vm_flags:%08lx anon !! 776         pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
538                  (void *)addr, vma->vm_flags,     777                  (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
539         pr_alert("file:%pD fault:%ps mmap:%ps  !! 778         pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
540                  vma->vm_file,                    779                  vma->vm_file,
541                  vma->vm_ops ? vma->vm_ops->fa    780                  vma->vm_ops ? vma->vm_ops->fault : NULL,
542                  vma->vm_file ? vma->vm_file->    781                  vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
543                  mapping ? mapping->a_ops->rea !! 782                  mapping ? mapping->a_ops->readpage : NULL);
544         dump_stack();                             783         dump_stack();
545         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_    784         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
546 }                                                 785 }
547                                                   786 
548 /*                                                787 /*
549  * vm_normal_page -- This function gets the "s    788  * vm_normal_page -- This function gets the "struct page" associated with a pte.
550  *                                                789  *
551  * "Special" mappings do not wish to be associ    790  * "Special" mappings do not wish to be associated with a "struct page" (either
552  * it doesn't exist, or it exists but they don    791  * it doesn't exist, or it exists but they don't want to touch it). In this
553  * case, NULL is returned here. "Normal" mappi    792  * case, NULL is returned here. "Normal" mappings do have a struct page.
554  *                                                793  *
555  * There are 2 broad cases. Firstly, an archit    794  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
556  * pte bit, in which case this function is tri    795  * pte bit, in which case this function is trivial. Secondly, an architecture
557  * may not have a spare pte bit, which require    796  * may not have a spare pte bit, which requires a more complicated scheme,
558  * described below.                               797  * described below.
559  *                                                798  *
560  * A raw VM_PFNMAP mapping (ie. one that is no    799  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
561  * special mapping (even if there are underlyi    800  * special mapping (even if there are underlying and valid "struct pages").
562  * COWed pages of a VM_PFNMAP are always norma    801  * COWed pages of a VM_PFNMAP are always normal.
563  *                                                802  *
564  * The way we recognize COWed pages within VM_    803  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
565  * rules set up by "remap_pfn_range()": the vm    804  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
566  * set, and the vm_pgoff will point to the fir    805  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
567  * mapping will always honor the rule             806  * mapping will always honor the rule
568  *                                                807  *
569  *      pfn_of_page == vma->vm_pgoff + ((addr     808  *      pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
570  *                                                809  *
571  * And for normal mappings this is false.         810  * And for normal mappings this is false.
572  *                                                811  *
573  * This restricts such mappings to be a linear    812  * This restricts such mappings to be a linear translation from virtual address
574  * to pfn. To get around this restriction, we     813  * to pfn. To get around this restriction, we allow arbitrary mappings so long
575  * as the vma is not a COW mapping; in that ca    814  * as the vma is not a COW mapping; in that case, we know that all ptes are
576  * special (because none can have been COWed).    815  * special (because none can have been COWed).
577  *                                                816  *
578  *                                                817  *
579  * In order to support COW of arbitrary specia    818  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
580  *                                                819  *
581  * VM_MIXEDMAP mappings can likewise contain m    820  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
582  * page" backing, however the difference is th    821  * page" backing, however the difference is that _all_ pages with a struct
583  * page (that is, those where pfn_valid is tru    822  * page (that is, those where pfn_valid is true) are refcounted and considered
584  * normal pages by the VM. The only exception  !! 823  * normal pages by the VM. The disadvantage is that pages are refcounted
585  * *never* refcounted.                         !! 824  * (which can be slower and simply not an option for some PFNMAP users). The
586  *                                             !! 825  * advantage is that we don't have to follow the strict linearity rule of
587  * The disadvantage is that pages are refcount !! 826  * PFNMAP mappings in order to support COWable mappings.
588  * simply not an option for some PFNMAP users) << 
589  * don't have to follow the strict linearity r << 
590  * order to support COWable mappings.          << 
591  *                                                827  *
592  */                                               828  */
593 struct page *vm_normal_page(struct vm_area_str !! 829 struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
594                             pte_t pte)         !! 830                              pte_t pte, bool with_public_device)
595 {                                                 831 {
596         unsigned long pfn = pte_pfn(pte);         832         unsigned long pfn = pte_pfn(pte);
597                                                   833 
598         if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPE    834         if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
599                 if (likely(!pte_special(pte)))    835                 if (likely(!pte_special(pte)))
600                         goto check_pfn;           836                         goto check_pfn;
601                 if (vma->vm_ops && vma->vm_ops    837                 if (vma->vm_ops && vma->vm_ops->find_special_page)
602                         return vma->vm_ops->fi    838                         return vma->vm_ops->find_special_page(vma, addr);
603                 if (vma->vm_flags & (VM_PFNMAP    839                 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
604                         return NULL;              840                         return NULL;
605                 if (is_zero_pfn(pfn))             841                 if (is_zero_pfn(pfn))
606                         return NULL;              842                         return NULL;
607                 if (pte_devmap(pte))           !! 843 
608                 /*                                844                 /*
609                  * NOTE: New users of ZONE_DEV !! 845                  * Device public pages are special pages (they are ZONE_DEVICE
610                  * and will have refcounts inc !! 846                  * pages but different from persistent memory). They behave
611                  * when they are inserted into !! 847                  * allmost like normal pages. The difference is that they are
612                  * return here. Legacy ZONE_DE !! 848                  * not on the lru and thus should never be involve with any-
613                  * do not have refcounts. Exam !! 849                  * thing that involve lru manipulation (mlock, numa balancing,
614                  * MEMORY_DEVICE_FS_DAX type i !! 850                  * ...).
                                                   >> 851                  *
                                                   >> 852                  * This is why we still want to return NULL for such page from
                                                   >> 853                  * vm_normal_page() so that we do not have to special case all
                                                   >> 854                  * call site of vm_normal_page().
615                  */                               855                  */
616                         return NULL;           !! 856                 if (likely(pfn <= highest_memmap_pfn)) {
                                                   >> 857                         struct page *page = pfn_to_page(pfn);
617                                                   858 
                                                   >> 859                         if (is_device_public_page(page)) {
                                                   >> 860                                 if (with_public_device)
                                                   >> 861                                         return page;
                                                   >> 862                                 return NULL;
                                                   >> 863                         }
                                                   >> 864                 }
618                 print_bad_pte(vma, addr, pte,     865                 print_bad_pte(vma, addr, pte, NULL);
619                 return NULL;                      866                 return NULL;
620         }                                         867         }
621                                                   868 
622         /* !CONFIG_ARCH_HAS_PTE_SPECIAL case f    869         /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
623                                                   870 
624         if (unlikely(vma->vm_flags & (VM_PFNMA    871         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
625                 if (vma->vm_flags & VM_MIXEDMA    872                 if (vma->vm_flags & VM_MIXEDMAP) {
626                         if (!pfn_valid(pfn))      873                         if (!pfn_valid(pfn))
627                                 return NULL;      874                                 return NULL;
628                         if (is_zero_pfn(pfn))  << 
629                                 return NULL;   << 
630                         goto out;                 875                         goto out;
631                 } else {                          876                 } else {
632                         unsigned long off;        877                         unsigned long off;
633                         off = (addr - vma->vm_    878                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
634                         if (pfn == vma->vm_pgo    879                         if (pfn == vma->vm_pgoff + off)
635                                 return NULL;      880                                 return NULL;
636                         if (!is_cow_mapping(vm    881                         if (!is_cow_mapping(vma->vm_flags))
637                                 return NULL;      882                                 return NULL;
638                 }                                 883                 }
639         }                                         884         }
640                                                   885 
641         if (is_zero_pfn(pfn))                     886         if (is_zero_pfn(pfn))
642                 return NULL;                      887                 return NULL;
643                                                   888 
644 check_pfn:                                        889 check_pfn:
645         if (unlikely(pfn > highest_memmap_pfn)    890         if (unlikely(pfn > highest_memmap_pfn)) {
646                 print_bad_pte(vma, addr, pte,     891                 print_bad_pte(vma, addr, pte, NULL);
647                 return NULL;                      892                 return NULL;
648         }                                         893         }
649                                                   894 
650         /*                                        895         /*
651          * NOTE! We still have PageReserved()     896          * NOTE! We still have PageReserved() pages in the page tables.
652          * eg. VDSO mappings can cause them to    897          * eg. VDSO mappings can cause them to exist.
653          */                                       898          */
654 out:                                              899 out:
655         VM_WARN_ON_ONCE(is_zero_pfn(pfn));     << 
656         return pfn_to_page(pfn);                  900         return pfn_to_page(pfn);
657 }                                                 901 }
658                                                   902 
659 struct folio *vm_normal_folio(struct vm_area_s !! 903 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
660                             pte_t pte)         << 
661 {                                              << 
662         struct page *page = vm_normal_page(vma << 
663                                                << 
664         if (page)                              << 
665                 return page_folio(page);       << 
666         return NULL;                           << 
667 }                                              << 
668                                                << 
669 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES          << 
670 struct page *vm_normal_page_pmd(struct vm_area    904 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
671                                 pmd_t pmd)        905                                 pmd_t pmd)
672 {                                                 906 {
673         unsigned long pfn = pmd_pfn(pmd);         907         unsigned long pfn = pmd_pfn(pmd);
674                                                   908 
675         /* Currently it's only used for huge p !! 909         /*
676         if (unlikely(pmd_special(pmd)))        !! 910          * There is no pmd_special() but there may be special pmds, e.g.
677                 return NULL;                   !! 911          * in a direct-access (dax) mapping, so let's just replicate the
678                                                !! 912          * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
                                                   >> 913          */
679         if (unlikely(vma->vm_flags & (VM_PFNMA    914         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
680                 if (vma->vm_flags & VM_MIXEDMA    915                 if (vma->vm_flags & VM_MIXEDMAP) {
681                         if (!pfn_valid(pfn))      916                         if (!pfn_valid(pfn))
682                                 return NULL;      917                                 return NULL;
683                         goto out;                 918                         goto out;
684                 } else {                          919                 } else {
685                         unsigned long off;        920                         unsigned long off;
686                         off = (addr - vma->vm_    921                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
687                         if (pfn == vma->vm_pgo    922                         if (pfn == vma->vm_pgoff + off)
688                                 return NULL;      923                                 return NULL;
689                         if (!is_cow_mapping(vm    924                         if (!is_cow_mapping(vma->vm_flags))
690                                 return NULL;      925                                 return NULL;
691                 }                                 926                 }
692         }                                         927         }
693                                                   928 
694         if (pmd_devmap(pmd))                   !! 929         if (is_zero_pfn(pfn))
695                 return NULL;                   << 
696         if (is_huge_zero_pmd(pmd))             << 
697                 return NULL;                      930                 return NULL;
698         if (unlikely(pfn > highest_memmap_pfn)    931         if (unlikely(pfn > highest_memmap_pfn))
699                 return NULL;                      932                 return NULL;
700                                                   933 
701         /*                                        934         /*
702          * NOTE! We still have PageReserved()     935          * NOTE! We still have PageReserved() pages in the page tables.
703          * eg. VDSO mappings can cause them to    936          * eg. VDSO mappings can cause them to exist.
704          */                                       937          */
705 out:                                              938 out:
706         return pfn_to_page(pfn);                  939         return pfn_to_page(pfn);
707 }                                                 940 }
708                                                << 
709 struct folio *vm_normal_folio_pmd(struct vm_ar << 
710                                   unsigned lon << 
711 {                                              << 
712         struct page *page = vm_normal_page_pmd << 
713                                                << 
714         if (page)                              << 
715                 return page_folio(page);       << 
716         return NULL;                           << 
717 }                                              << 
718 #endif                                            941 #endif
719                                                   942 
720 static void restore_exclusive_pte(struct vm_ar << 
721                                   struct page  << 
722                                   pte_t *ptep) << 
723 {                                              << 
724         struct folio *folio = page_folio(page) << 
725         pte_t orig_pte;                        << 
726         pte_t pte;                             << 
727         swp_entry_t entry;                     << 
728                                                << 
729         orig_pte = ptep_get(ptep);             << 
730         pte = pte_mkold(mk_pte(page, READ_ONCE << 
731         if (pte_swp_soft_dirty(orig_pte))      << 
732                 pte = pte_mksoft_dirty(pte);   << 
733                                                << 
734         entry = pte_to_swp_entry(orig_pte);    << 
735         if (pte_swp_uffd_wp(orig_pte))         << 
736                 pte = pte_mkuffd_wp(pte);      << 
737         else if (is_writable_device_exclusive_ << 
738                 pte = maybe_mkwrite(pte_mkdirt << 
739                                                << 
740         VM_BUG_ON_FOLIO(pte_write(pte) && (!fo << 
741                                            Pag << 
742                                                << 
743         /*                                     << 
744          * No need to take a page reference as << 
745          * created when the swap entry was mad << 
746          */                                    << 
747         if (folio_test_anon(folio))            << 
748                 folio_add_anon_rmap_pte(folio, << 
749         else                                   << 
750                 /*                             << 
751                  * Currently device exclusive  << 
752                  * memory so the entry shouldn << 
753                  */                            << 
754                 WARN_ON_ONCE(1);               << 
755                                                << 
756         set_pte_at(vma->vm_mm, address, ptep,  << 
757                                                << 
758         /*                                     << 
759          * No need to invalidate - it was non- << 
760          * secondary CPUs may have mappings th << 
761          */                                    << 
762         update_mmu_cache(vma, address, ptep);  << 
763 }                                              << 
764                                                << 
765 /*                                             << 
766  * Tries to restore an exclusive pte if the pa << 
767  * sleeping.                                   << 
768  */                                            << 
769 static int                                     << 
770 try_restore_exclusive_pte(pte_t *src_pte, stru << 
771                         unsigned long addr)    << 
772 {                                              << 
773         swp_entry_t entry = pte_to_swp_entry(p << 
774         struct page *page = pfn_swap_entry_to_ << 
775                                                << 
776         if (trylock_page(page)) {              << 
777                 restore_exclusive_pte(vma, pag << 
778                 unlock_page(page);             << 
779                 return 0;                      << 
780         }                                      << 
781                                                << 
782         return -EBUSY;                         << 
783 }                                              << 
784                                                << 
785 /*                                                943 /*
786  * copy one vm_area from one task to the other    944  * copy one vm_area from one task to the other. Assumes the page tables
787  * already present in the new task to be clear    945  * already present in the new task to be cleared in the whole range
788  * covered by this vma.                           946  * covered by this vma.
789  */                                               947  */
790                                                   948 
791 static unsigned long                           !! 949 static inline unsigned long
792 copy_nonpresent_pte(struct mm_struct *dst_mm,  !! 950 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
793                 pte_t *dst_pte, pte_t *src_pte !! 951                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
794                 struct vm_area_struct *src_vma !! 952                 unsigned long addr, int *rss)
795 {                                              !! 953 {
796         unsigned long vm_flags = dst_vma->vm_f !! 954         unsigned long vm_flags = vma->vm_flags;
797         pte_t orig_pte = ptep_get(src_pte);    !! 955         pte_t pte = *src_pte;
798         pte_t pte = orig_pte;                  << 
799         struct folio *folio;                   << 
800         struct page *page;                        956         struct page *page;
801         swp_entry_t entry = pte_to_swp_entry(o << 
802                                                   957 
803         if (likely(!non_swap_entry(entry))) {  !! 958         /* pte contains position in swap or file, so copy. */
804                 if (swap_duplicate(entry) < 0) !! 959         if (unlikely(!pte_present(pte))) {
805                         return -EIO;           !! 960                 swp_entry_t entry = pte_to_swp_entry(pte);
806                                                !! 961 
807                 /* make sure dst_mm is on swap !! 962                 if (likely(!non_swap_entry(entry))) {
808                 if (unlikely(list_empty(&dst_m !! 963                         if (swap_duplicate(entry) < 0)
809                         spin_lock(&mmlist_lock !! 964                                 return entry.val;
810                         if (list_empty(&dst_mm !! 965 
811                                 list_add(&dst_ !! 966                         /* make sure dst_mm is on swapoff's mmlist. */
812                                                !! 967                         if (unlikely(list_empty(&dst_mm->mmlist))) {
813                         spin_unlock(&mmlist_lo !! 968                                 spin_lock(&mmlist_lock);
814                 }                              !! 969                                 if (list_empty(&dst_mm->mmlist))
815                 /* Mark the swap entry as shar !! 970                                         list_add(&dst_mm->mmlist,
816                 if (pte_swp_exclusive(orig_pte !! 971                                                         &src_mm->mmlist);
817                         pte = pte_swp_clear_ex !! 972                                 spin_unlock(&mmlist_lock);
818                         set_pte_at(src_mm, add !! 973                         }
819                 }                              !! 974                         rss[MM_SWAPENTS]++;
820                 rss[MM_SWAPENTS]++;            !! 975                 } else if (is_migration_entry(entry)) {
821         } else if (is_migration_entry(entry))  !! 976                         page = migration_entry_to_page(entry);
822                 folio = pfn_swap_entry_folio(e !! 977 
                                                   >> 978                         rss[mm_counter(page)]++;
823                                                   979 
824                 rss[mm_counter(folio)]++;      !! 980                         if (is_write_migration_entry(entry) &&
                                                   >> 981                                         is_cow_mapping(vm_flags)) {
                                                   >> 982                                 /*
                                                   >> 983                                  * COW mappings require pages in both
                                                   >> 984                                  * parent and child to be set to read.
                                                   >> 985                                  */
                                                   >> 986                                 make_migration_entry_read(&entry);
                                                   >> 987                                 pte = swp_entry_to_pte(entry);
                                                   >> 988                                 if (pte_swp_soft_dirty(*src_pte))
                                                   >> 989                                         pte = pte_swp_mksoft_dirty(pte);
                                                   >> 990                                 set_pte_at(src_mm, addr, src_pte, pte);
                                                   >> 991                         }
                                                   >> 992                 } else if (is_device_private_entry(entry)) {
                                                   >> 993                         page = device_private_entry_to_page(entry);
825                                                   994 
826                 if (!is_readable_migration_ent << 
827                                 is_cow_mapping << 
828                         /*                        995                         /*
829                          * COW mappings requir !! 996                          * Update rss count even for unaddressable pages, as
830                          * to be set to read.  !! 997                          * they should treated just like normal pages in this
831                          * now shared.         !! 998                          * respect.
                                                   >> 999                          *
                                                   >> 1000                          * We will likely want to have some new rss counters
                                                   >> 1001                          * for unaddressable pages, at some point. But for now
                                                   >> 1002                          * keep things as they are.
832                          */                       1003                          */
833                         entry = make_readable_ !! 1004                         get_page(page);
834                                                !! 1005                         rss[mm_counter(page)]++;
835                         pte = swp_entry_to_pte !! 1006                         page_dup_rmap(page, false);
836                         if (pte_swp_soft_dirty << 
837                                 pte = pte_swp_ << 
838                         if (pte_swp_uffd_wp(or << 
839                                 pte = pte_swp_ << 
840                         set_pte_at(src_mm, add << 
841                 }                              << 
842         } else if (is_device_private_entry(ent << 
843                 page = pfn_swap_entry_to_page( << 
844                 folio = page_folio(page);      << 
845                                                   1007 
846                 /*                             !! 1008                         /*
847                  * Update rss count even for u !! 1009                          * We do not preserve soft-dirty information, because so
848                  * they should treated just li !! 1010                          * far, checkpoint/restore is the only feature that
849                  * respect.                    !! 1011                          * requires that. And checkpoint/restore does not work
850                  *                             !! 1012                          * when a device driver is involved (you cannot easily
851                  * We will likely want to have !! 1013                          * save and restore device driver state).
852                  * for unaddressable pages, at !! 1014                          */
853                  * keep things as they are.    !! 1015                         if (is_write_device_private_entry(entry) &&
854                  */                            !! 1016                             is_cow_mapping(vm_flags)) {
855                 folio_get(folio);              !! 1017                                 make_device_private_entry_read(&entry);
856                 rss[mm_counter(folio)]++;      !! 1018                                 pte = swp_entry_to_pte(entry);
857                 /* Cannot fail as these pages  !! 1019                                 set_pte_at(src_mm, addr, src_pte, pte);
858                 folio_try_dup_anon_rmap_pte(fo !! 1020                         }
859                                                << 
860                 /*                             << 
861                  * We do not preserve soft-dir << 
862                  * far, checkpoint/restore is  << 
863                  * requires that. And checkpoi << 
864                  * when a device driver is inv << 
865                  * save and restore device dri << 
866                  */                            << 
867                 if (is_writable_device_private << 
868                     is_cow_mapping(vm_flags))  << 
869                         entry = make_readable_ << 
870                                                << 
871                         pte = swp_entry_to_pte << 
872                         if (pte_swp_uffd_wp(or << 
873                                 pte = pte_swp_ << 
874                         set_pte_at(src_mm, add << 
875                 }                                 1021                 }
876         } else if (is_device_exclusive_entry(e !! 1022                 goto out_set_pte;
877                 /*                             << 
878                  * Make device exclusive entri << 
879                  * original entry then copying << 
880                  * exclusive entries currently << 
881                  * (ie. COW) mappings.         << 
882                  */                            << 
883                 VM_BUG_ON(!is_cow_mapping(src_ << 
884                 if (try_restore_exclusive_pte( << 
885                         return -EBUSY;         << 
886                 return -ENOENT;                << 
887         } else if (is_pte_marker_entry(entry)) << 
888                 pte_marker marker = copy_pte_m << 
889                                                << 
890                 if (marker)                    << 
891                         set_pte_at(dst_mm, add << 
892                                    make_pte_ma << 
893                 return 0;                      << 
894         }                                         1023         }
895         if (!userfaultfd_wp(dst_vma))          << 
896                 pte = pte_swp_clear_uffd_wp(pt << 
897         set_pte_at(dst_mm, addr, dst_pte, pte) << 
898         return 0;                              << 
899 }                                              << 
900                                                << 
901 /*                                             << 
902  * Copy a present and normal page.             << 
903  *                                             << 
904  * NOTE! The usual case is that this isn't req << 
905  * instead, the caller can just increase the p << 
906  * and re-use the pte the traditional way.     << 
907  *                                             << 
908  * And if we need a pre-allocated page but don << 
909  * one, return a negative error to let the pre << 
910  * code know so that it can do so outside the  << 
911  * lock.                                       << 
912  */                                            << 
913 static inline int                              << 
914 copy_present_page(struct vm_area_struct *dst_v << 
915                   pte_t *dst_pte, pte_t *src_p << 
916                   struct folio **prealloc, str << 
917 {                                              << 
918         struct folio *new_folio;               << 
919         pte_t pte;                             << 
920                                                   1024 
921         new_folio = *prealloc;                 !! 1025         /*
922         if (!new_folio)                        !! 1026          * If it's a COW mapping, write protect it both
923                 return -EAGAIN;                !! 1027          * in the parent and the child
924                                                !! 1028          */
925         /*                                     !! 1029         if (is_cow_mapping(vm_flags)) {
926          * We have a prealloc page, all good!  !! 1030                 ptep_set_wrprotect(src_mm, addr, src_pte);
927          * over and copy the page & arm it.    << 
928          */                                    << 
929                                                << 
930         if (copy_mc_user_highpage(&new_folio-> << 
931                 return -EHWPOISON;             << 
932                                                << 
933         *prealloc = NULL;                      << 
934         __folio_mark_uptodate(new_folio);      << 
935         folio_add_new_anon_rmap(new_folio, dst << 
936         folio_add_lru_vma(new_folio, dst_vma); << 
937         rss[MM_ANONPAGES]++;                   << 
938                                                << 
939         /* All done, just insert the new page  << 
940         pte = mk_pte(&new_folio->page, dst_vma << 
941         pte = maybe_mkwrite(pte_mkdirty(pte),  << 
942         if (userfaultfd_pte_wp(dst_vma, ptep_g << 
943                 /* Uffd-wp needs to be deliver << 
944                 pte = pte_mkuffd_wp(pte);      << 
945         set_pte_at(dst_vma->vm_mm, addr, dst_p << 
946         return 0;                              << 
947 }                                              << 
948                                                << 
949 static __always_inline void __copy_present_pte << 
950                 struct vm_area_struct *src_vma << 
951                 pte_t pte, unsigned long addr, << 
952 {                                              << 
953         struct mm_struct *src_mm = src_vma->vm << 
954                                                << 
955         /* If it's a COW mapping, write protec << 
956         if (is_cow_mapping(src_vma->vm_flags)  << 
957                 wrprotect_ptes(src_mm, addr, s << 
958                 pte = pte_wrprotect(pte);         1031                 pte = pte_wrprotect(pte);
959         }                                         1032         }
960                                                   1033 
961         /* If it's a shared mapping, mark it c !! 1034         /*
962         if (src_vma->vm_flags & VM_SHARED)     !! 1035          * If it's a shared mapping, mark it clean in
                                                   >> 1036          * the child
                                                   >> 1037          */
                                                   >> 1038         if (vm_flags & VM_SHARED)
963                 pte = pte_mkclean(pte);           1039                 pte = pte_mkclean(pte);
964         pte = pte_mkold(pte);                     1040         pte = pte_mkold(pte);
965                                                   1041 
966         if (!userfaultfd_wp(dst_vma))          !! 1042         page = vm_normal_page(vma, addr, pte);
967                 pte = pte_clear_uffd_wp(pte);  !! 1043         if (page) {
                                                   >> 1044                 get_page(page);
                                                   >> 1045                 page_dup_rmap(page, false);
                                                   >> 1046                 rss[mm_counter(page)]++;
                                                   >> 1047         } else if (pte_devmap(pte)) {
                                                   >> 1048                 page = pte_page(pte);
968                                                   1049 
969         set_ptes(dst_vma->vm_mm, addr, dst_pte << 
970 }                                              << 
971                                                << 
972 /*                                             << 
973  * Copy one present PTE, trying to batch-proce << 
974  * consecutive pages of the same folio by copy << 
975  *                                             << 
976  * Returns -EAGAIN if one preallocated page is << 
977  * Otherwise, returns the number of copied PTE << 
978  */                                            << 
979 static inline int                              << 
980 copy_present_ptes(struct vm_area_struct *dst_v << 
981                  pte_t *dst_pte, pte_t *src_pt << 
982                  int max_nr, int *rss, struct  << 
983 {                                              << 
984         struct page *page;                     << 
985         struct folio *folio;                   << 
986         bool any_writable;                     << 
987         fpb_t flags = 0;                       << 
988         int err, nr;                           << 
989                                                << 
990         page = vm_normal_page(src_vma, addr, p << 
991         if (unlikely(!page))                   << 
992                 goto copy_pte;                 << 
993                                                << 
994         folio = page_folio(page);              << 
995                                                << 
996         /*                                     << 
997          * If we likely have to copy, just don << 
998          * sure that the common "small folio"  << 
999          * by keeping the batching logic separ << 
1000          */                                   << 
1001         if (unlikely(!*prealloc && folio_test << 
1002                 if (src_vma->vm_flags & VM_SH << 
1003                         flags |= FPB_IGNORE_D << 
1004                 if (!vma_soft_dirty_enabled(s << 
1005                         flags |= FPB_IGNORE_S << 
1006                                               << 
1007                 nr = folio_pte_batch(folio, a << 
1008                                      &any_wri << 
1009                 folio_ref_add(folio, nr);     << 
1010                 if (folio_test_anon(folio)) { << 
1011                         if (unlikely(folio_tr << 
1012                                               << 
1013                                 folio_ref_sub << 
1014                                 return -EAGAI << 
1015                         }                     << 
1016                         rss[MM_ANONPAGES] +=  << 
1017                         VM_WARN_ON_FOLIO(Page << 
1018                 } else {                      << 
1019                         folio_dup_file_rmap_p << 
1020                         rss[mm_counter_file(f << 
1021                 }                             << 
1022                 if (any_writable)             << 
1023                         pte = pte_mkwrite(pte << 
1024                 __copy_present_ptes(dst_vma,  << 
1025                                     addr, nr) << 
1026                 return nr;                    << 
1027         }                                     << 
1028                                               << 
1029         folio_get(folio);                     << 
1030         if (folio_test_anon(folio)) {         << 
1031                 /*                               1050                 /*
1032                  * If this page may have been !! 1051                  * Cache coherent device memory behave like regular page and
1033                  * copy the page immediately  !! 1052                  * not like persistent memory page. For more informations see
1034                  * guarantee the pinned page  !! 1053                  * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h
1035                  * future.                    << 
1036                  */                              1054                  */
1037                 if (unlikely(folio_try_dup_an !! 1055                 if (is_device_public_page(page)) {
1038                         /* Page may be pinned !! 1056                         get_page(page);
1039                         folio_put(folio);     !! 1057                         page_dup_rmap(page, false);
1040                         err = copy_present_pa !! 1058                         rss[mm_counter(page)]++;
1041                                               << 
1042                         return err ? err : 1; << 
1043                 }                                1059                 }
1044                 rss[MM_ANONPAGES]++;          << 
1045                 VM_WARN_ON_FOLIO(PageAnonExcl << 
1046         } else {                              << 
1047                 folio_dup_file_rmap_pte(folio << 
1048                 rss[mm_counter_file(folio)]++ << 
1049         }                                     << 
1050                                               << 
1051 copy_pte:                                     << 
1052         __copy_present_ptes(dst_vma, src_vma, << 
1053         return 1;                             << 
1054 }                                             << 
1055                                               << 
1056 static inline struct folio *folio_prealloc(st << 
1057                 struct vm_area_struct *vma, u << 
1058 {                                             << 
1059         struct folio *new_folio;              << 
1060                                               << 
1061         if (need_zero)                        << 
1062                 new_folio = vma_alloc_zeroed_ << 
1063         else                                  << 
1064                 new_folio = vma_alloc_folio(G << 
1065                                             a << 
1066                                               << 
1067         if (!new_folio)                       << 
1068                 return NULL;                  << 
1069                                               << 
1070         if (mem_cgroup_charge(new_folio, src_ << 
1071                 folio_put(new_folio);         << 
1072                 return NULL;                  << 
1073         }                                        1060         }
1074         folio_throttle_swaprate(new_folio, GF << 
1075                                                  1061 
1076         return new_folio;                     !! 1062 out_set_pte:
                                                   >> 1063         set_pte_at(dst_mm, addr, dst_pte, pte);
                                                   >> 1064         return 0;
1077 }                                                1065 }
1078                                                  1066 
1079 static int                                    !! 1067 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1080 copy_pte_range(struct vm_area_struct *dst_vma !! 1068                    pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
1081                pmd_t *dst_pmd, pmd_t *src_pmd !! 1069                    unsigned long addr, unsigned long end)
1082                unsigned long end)             << 
1083 {                                                1070 {
1084         struct mm_struct *dst_mm = dst_vma->v << 
1085         struct mm_struct *src_mm = src_vma->v << 
1086         pte_t *orig_src_pte, *orig_dst_pte;      1071         pte_t *orig_src_pte, *orig_dst_pte;
1087         pte_t *src_pte, *dst_pte;                1072         pte_t *src_pte, *dst_pte;
1088         pte_t ptent;                          << 
1089         spinlock_t *src_ptl, *dst_ptl;           1073         spinlock_t *src_ptl, *dst_ptl;
1090         int progress, max_nr, ret = 0;        !! 1074         int progress = 0;
1091         int rss[NR_MM_COUNTERS];                 1075         int rss[NR_MM_COUNTERS];
1092         swp_entry_t entry = (swp_entry_t){0};    1076         swp_entry_t entry = (swp_entry_t){0};
1093         struct folio *prealloc = NULL;        << 
1094         int nr;                               << 
1095                                                  1077 
1096 again:                                           1078 again:
1097         progress = 0;                         << 
1098         init_rss_vec(rss);                       1079         init_rss_vec(rss);
1099                                                  1080 
1100         /*                                    << 
1101          * copy_pmd_range()'s prior pmd_none_ << 
1102          * error handling here, assume that e << 
1103          * protects anon from unexpected THP  << 
1104          * protected by mmap_lock-less collap << 
1105          * (whereas vma_needs_copy() skips ar << 
1106          * can remove such assumptions later, << 
1107          */                                   << 
1108         dst_pte = pte_alloc_map_lock(dst_mm,     1081         dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1109         if (!dst_pte) {                       !! 1082         if (!dst_pte)
1110                 ret = -ENOMEM;                !! 1083                 return -ENOMEM;
1111                 goto out;                     !! 1084         src_pte = pte_offset_map(src_pmd, addr);
1112         }                                     !! 1085         src_ptl = pte_lockptr(src_mm, src_pmd);
1113         src_pte = pte_offset_map_nolock(src_m << 
1114         if (!src_pte) {                       << 
1115                 pte_unmap_unlock(dst_pte, dst << 
1116                 /* ret == 0 */                << 
1117                 goto out;                     << 
1118         }                                     << 
1119         spin_lock_nested(src_ptl, SINGLE_DEPT    1086         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1120         orig_src_pte = src_pte;                  1087         orig_src_pte = src_pte;
1121         orig_dst_pte = dst_pte;                  1088         orig_dst_pte = dst_pte;
1122         arch_enter_lazy_mmu_mode();              1089         arch_enter_lazy_mmu_mode();
1123                                                  1090 
1124         do {                                     1091         do {
1125                 nr = 1;                       << 
1126                                               << 
1127                 /*                               1092                 /*
1128                  * We are holding two locks a    1093                  * We are holding two locks at this point - either of them
1129                  * could generate latencies i    1094                  * could generate latencies in another task on another CPU.
1130                  */                              1095                  */
1131                 if (progress >= 32) {            1096                 if (progress >= 32) {
1132                         progress = 0;            1097                         progress = 0;
1133                         if (need_resched() ||    1098                         if (need_resched() ||
1134                             spin_needbreak(sr    1099                             spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1135                                 break;           1100                                 break;
1136                 }                                1101                 }
1137                 ptent = ptep_get(src_pte);    !! 1102                 if (pte_none(*src_pte)) {
1138                 if (pte_none(ptent)) {        << 
1139                         progress++;              1103                         progress++;
1140                         continue;                1104                         continue;
1141                 }                                1105                 }
1142                 if (unlikely(!pte_present(pte !! 1106                 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
1143                         ret = copy_nonpresent !! 1107                                                         vma, addr, rss);
1144                                               !! 1108                 if (entry.val)
1145                                               << 
1146                                               << 
1147                         if (ret == -EIO) {    << 
1148                                 entry = pte_t << 
1149                                 break;        << 
1150                         } else if (ret == -EB << 
1151                                 break;        << 
1152                         } else if (!ret) {    << 
1153                                 progress += 8 << 
1154                                 continue;     << 
1155                         }                     << 
1156                         ptent = ptep_get(src_ << 
1157                         VM_WARN_ON_ONCE(!pte_ << 
1158                                               << 
1159                         /*                    << 
1160                          * Device exclusive e << 
1161                          * the now present pt << 
1162                          */                   << 
1163                         WARN_ON_ONCE(ret != - << 
1164                 }                             << 
1165                 /* copy_present_ptes() will c << 
1166                 max_nr = (end - addr) / PAGE_ << 
1167                 ret = copy_present_ptes(dst_v << 
1168                                         ptent << 
1169                 /*                            << 
1170                  * If we need a pre-allocated << 
1171                  * locks, allocate, and try a << 
1172                  * If copy failed due to hwpo << 
1173                  */                           << 
1174                 if (unlikely(ret == -EAGAIN | << 
1175                         break;                   1109                         break;
1176                 if (unlikely(prealloc)) {     !! 1110                 progress += 8;
1177                         /*                    !! 1111         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1178                          * pre-alloc page can << 
1179                          * to strictly follow << 
1180                          * will allocate page << 
1181                          * could only happen  << 
1182                          */                   << 
1183                         folio_put(prealloc);  << 
1184                         prealloc = NULL;      << 
1185                 }                             << 
1186                 nr = ret;                     << 
1187                 progress += 8 * nr;           << 
1188         } while (dst_pte += nr, src_pte += nr << 
1189                  addr != end);                << 
1190                                                  1112 
1191         arch_leave_lazy_mmu_mode();              1113         arch_leave_lazy_mmu_mode();
1192         pte_unmap_unlock(orig_src_pte, src_pt !! 1114         spin_unlock(src_ptl);
                                                   >> 1115         pte_unmap(orig_src_pte);
1193         add_mm_rss_vec(dst_mm, rss);             1116         add_mm_rss_vec(dst_mm, rss);
1194         pte_unmap_unlock(orig_dst_pte, dst_pt    1117         pte_unmap_unlock(orig_dst_pte, dst_ptl);
1195         cond_resched();                          1118         cond_resched();
1196                                                  1119 
1197         if (ret == -EIO) {                    !! 1120         if (entry.val) {
1198                 VM_WARN_ON_ONCE(!entry.val);  !! 1121                 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
1199                 if (add_swap_count_continuati << 
1200                         ret = -ENOMEM;        << 
1201                         goto out;             << 
1202                 }                             << 
1203                 entry.val = 0;                << 
1204         } else if (ret == -EBUSY || unlikely( << 
1205                 goto out;                     << 
1206         } else if (ret ==  -EAGAIN) {         << 
1207                 prealloc = folio_prealloc(src << 
1208                 if (!prealloc)                << 
1209                         return -ENOMEM;          1122                         return -ENOMEM;
1210         } else if (ret < 0) {                 !! 1123                 progress = 0;
1211                 VM_WARN_ON_ONCE(1);           << 
1212         }                                        1124         }
1213                                               << 
1214         /* We've captured and resolved the er << 
1215         ret = 0;                              << 
1216                                               << 
1217         if (addr != end)                         1125         if (addr != end)
1218                 goto again;                      1126                 goto again;
1219 out:                                          !! 1127         return 0;
1220         if (unlikely(prealloc))               << 
1221                 folio_put(prealloc);          << 
1222         return ret;                           << 
1223 }                                                1128 }
1224                                                  1129 
1225 static inline int                             !! 1130 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1226 copy_pmd_range(struct vm_area_struct *dst_vma !! 1131                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
1227                pud_t *dst_pud, pud_t *src_pud !! 1132                 unsigned long addr, unsigned long end)
1228                unsigned long end)             << 
1229 {                                                1133 {
1230         struct mm_struct *dst_mm = dst_vma->v << 
1231         struct mm_struct *src_mm = src_vma->v << 
1232         pmd_t *src_pmd, *dst_pmd;                1134         pmd_t *src_pmd, *dst_pmd;
1233         unsigned long next;                      1135         unsigned long next;
1234                                                  1136 
1235         dst_pmd = pmd_alloc(dst_mm, dst_pud,     1137         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1236         if (!dst_pmd)                            1138         if (!dst_pmd)
1237                 return -ENOMEM;                  1139                 return -ENOMEM;
1238         src_pmd = pmd_offset(src_pud, addr);     1140         src_pmd = pmd_offset(src_pud, addr);
1239         do {                                     1141         do {
1240                 next = pmd_addr_end(addr, end    1142                 next = pmd_addr_end(addr, end);
1241                 if (is_swap_pmd(*src_pmd) ||     1143                 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1242                         || pmd_devmap(*src_pm    1144                         || pmd_devmap(*src_pmd)) {
1243                         int err;                 1145                         int err;
1244                         VM_BUG_ON_VMA(next-ad !! 1146                         VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
1245                         err = copy_huge_pmd(d !! 1147                         err = copy_huge_pmd(dst_mm, src_mm,
1246                                             a !! 1148                                             dst_pmd, src_pmd, addr, vma);
1247                         if (err == -ENOMEM)      1149                         if (err == -ENOMEM)
1248                                 return -ENOME    1150                                 return -ENOMEM;
1249                         if (!err)                1151                         if (!err)
1250                                 continue;        1152                                 continue;
1251                         /* fall through */       1153                         /* fall through */
1252                 }                                1154                 }
1253                 if (pmd_none_or_clear_bad(src    1155                 if (pmd_none_or_clear_bad(src_pmd))
1254                         continue;                1156                         continue;
1255                 if (copy_pte_range(dst_vma, s !! 1157                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1256                                    addr, next !! 1158                                                 vma, addr, next))
1257                         return -ENOMEM;          1159                         return -ENOMEM;
1258         } while (dst_pmd++, src_pmd++, addr =    1160         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1259         return 0;                                1161         return 0;
1260 }                                                1162 }
1261                                                  1163 
1262 static inline int                             !! 1164 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1263 copy_pud_range(struct vm_area_struct *dst_vma !! 1165                 p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
1264                p4d_t *dst_p4d, p4d_t *src_p4d !! 1166                 unsigned long addr, unsigned long end)
1265                unsigned long end)             << 
1266 {                                                1167 {
1267         struct mm_struct *dst_mm = dst_vma->v << 
1268         struct mm_struct *src_mm = src_vma->v << 
1269         pud_t *src_pud, *dst_pud;                1168         pud_t *src_pud, *dst_pud;
1270         unsigned long next;                      1169         unsigned long next;
1271                                                  1170 
1272         dst_pud = pud_alloc(dst_mm, dst_p4d,     1171         dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1273         if (!dst_pud)                            1172         if (!dst_pud)
1274                 return -ENOMEM;                  1173                 return -ENOMEM;
1275         src_pud = pud_offset(src_p4d, addr);     1174         src_pud = pud_offset(src_p4d, addr);
1276         do {                                     1175         do {
1277                 next = pud_addr_end(addr, end    1176                 next = pud_addr_end(addr, end);
1278                 if (pud_trans_huge(*src_pud)     1177                 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1279                         int err;                 1178                         int err;
1280                                                  1179 
1281                         VM_BUG_ON_VMA(next-ad !! 1180                         VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
1282                         err = copy_huge_pud(d    1181                         err = copy_huge_pud(dst_mm, src_mm,
1283                                             d !! 1182                                             dst_pud, src_pud, addr, vma);
1284                         if (err == -ENOMEM)      1183                         if (err == -ENOMEM)
1285                                 return -ENOME    1184                                 return -ENOMEM;
1286                         if (!err)                1185                         if (!err)
1287                                 continue;        1186                                 continue;
1288                         /* fall through */       1187                         /* fall through */
1289                 }                                1188                 }
1290                 if (pud_none_or_clear_bad(src    1189                 if (pud_none_or_clear_bad(src_pud))
1291                         continue;                1190                         continue;
1292                 if (copy_pmd_range(dst_vma, s !! 1191                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1293                                    addr, next !! 1192                                                 vma, addr, next))
1294                         return -ENOMEM;          1193                         return -ENOMEM;
1295         } while (dst_pud++, src_pud++, addr =    1194         } while (dst_pud++, src_pud++, addr = next, addr != end);
1296         return 0;                                1195         return 0;
1297 }                                                1196 }
1298                                                  1197 
1299 static inline int                             !! 1198 static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1300 copy_p4d_range(struct vm_area_struct *dst_vma !! 1199                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1301                pgd_t *dst_pgd, pgd_t *src_pgd !! 1200                 unsigned long addr, unsigned long end)
1302                unsigned long end)             << 
1303 {                                                1201 {
1304         struct mm_struct *dst_mm = dst_vma->v << 
1305         p4d_t *src_p4d, *dst_p4d;                1202         p4d_t *src_p4d, *dst_p4d;
1306         unsigned long next;                      1203         unsigned long next;
1307                                                  1204 
1308         dst_p4d = p4d_alloc(dst_mm, dst_pgd,     1205         dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1309         if (!dst_p4d)                            1206         if (!dst_p4d)
1310                 return -ENOMEM;                  1207                 return -ENOMEM;
1311         src_p4d = p4d_offset(src_pgd, addr);     1208         src_p4d = p4d_offset(src_pgd, addr);
1312         do {                                     1209         do {
1313                 next = p4d_addr_end(addr, end    1210                 next = p4d_addr_end(addr, end);
1314                 if (p4d_none_or_clear_bad(src    1211                 if (p4d_none_or_clear_bad(src_p4d))
1315                         continue;                1212                         continue;
1316                 if (copy_pud_range(dst_vma, s !! 1213                 if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
1317                                    addr, next !! 1214                                                 vma, addr, next))
1318                         return -ENOMEM;          1215                         return -ENOMEM;
1319         } while (dst_p4d++, src_p4d++, addr =    1216         } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1320         return 0;                                1217         return 0;
1321 }                                                1218 }
1322                                                  1219 
1323 /*                                            !! 1220 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1324  * Return true if the vma needs to copy the p !! 1221                 struct vm_area_struct *vma)
1325  * false when we can speed up fork() by allow << 
1326  * when the child accesses the memory range.  << 
1327  */                                           << 
1328 static bool                                   << 
1329 vma_needs_copy(struct vm_area_struct *dst_vma << 
1330 {                                             << 
1331         /*                                    << 
1332          * Always copy pgtables when dst_vma  << 
1333          * file-backed (e.g. shmem). Because  << 
1334          * contains uffd-wp protection inform << 
1335          * retrieve from page cache, and skip << 
1336          */                                   << 
1337         if (userfaultfd_wp(dst_vma))          << 
1338                 return true;                  << 
1339                                               << 
1340         if (src_vma->vm_flags & (VM_PFNMAP |  << 
1341                 return true;                  << 
1342                                               << 
1343         if (src_vma->anon_vma)                << 
1344                 return true;                  << 
1345                                               << 
1346         /*                                    << 
1347          * Don't copy ptes where a page fault << 
1348          * becomes much lighter when there ar << 
1349          * mappings. The tradeoff is that cop << 
1350          * than faulting.                     << 
1351          */                                   << 
1352         return false;                         << 
1353 }                                             << 
1354                                               << 
1355 int                                           << 
1356 copy_page_range(struct vm_area_struct *dst_vm << 
1357 {                                                1222 {
1358         pgd_t *src_pgd, *dst_pgd;                1223         pgd_t *src_pgd, *dst_pgd;
1359         unsigned long next;                      1224         unsigned long next;
1360         unsigned long addr = src_vma->vm_star !! 1225         unsigned long addr = vma->vm_start;
1361         unsigned long end = src_vma->vm_end;  !! 1226         unsigned long end = vma->vm_end;
1362         struct mm_struct *dst_mm = dst_vma->v !! 1227         unsigned long mmun_start;       /* For mmu_notifiers */
1363         struct mm_struct *src_mm = src_vma->v !! 1228         unsigned long mmun_end;         /* For mmu_notifiers */
1364         struct mmu_notifier_range range;      << 
1365         bool is_cow;                             1229         bool is_cow;
1366         int ret;                                 1230         int ret;
1367                                                  1231 
1368         if (!vma_needs_copy(dst_vma, src_vma) !! 1232         /*
                                                   >> 1233          * Don't copy ptes where a page fault will fill them correctly.
                                                   >> 1234          * Fork becomes much lighter when there are big shared or private
                                                   >> 1235          * readonly mappings. The tradeoff is that copy_page_range is more
                                                   >> 1236          * efficient than faulting.
                                                   >> 1237          */
                                                   >> 1238         if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
                                                   >> 1239                         !vma->anon_vma)
1369                 return 0;                        1240                 return 0;
1370                                                  1241 
1371         if (is_vm_hugetlb_page(src_vma))      !! 1242         if (is_vm_hugetlb_page(vma))
1372                 return copy_hugetlb_page_rang !! 1243                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1373                                                  1244 
1374         if (unlikely(src_vma->vm_flags & VM_P !! 1245         if (unlikely(vma->vm_flags & VM_PFNMAP)) {
1375                 /*                               1246                 /*
1376                  * We do not free on error ca    1247                  * We do not free on error cases below as remove_vma
1377                  * gets called on error from     1248                  * gets called on error from higher level routine
1378                  */                              1249                  */
1379                 ret = track_pfn_copy(src_vma) !! 1250                 ret = track_pfn_copy(vma);
1380                 if (ret)                         1251                 if (ret)
1381                         return ret;              1252                         return ret;
1382         }                                        1253         }
1383                                                  1254 
1384         /*                                       1255         /*
1385          * We need to invalidate the secondar    1256          * We need to invalidate the secondary MMU mappings only when
1386          * there could be a permission downgr    1257          * there could be a permission downgrade on the ptes of the
1387          * parent mm. And a permission downgr    1258          * parent mm. And a permission downgrade will only happen if
1388          * is_cow_mapping() returns true.        1259          * is_cow_mapping() returns true.
1389          */                                      1260          */
1390         is_cow = is_cow_mapping(src_vma->vm_f !! 1261         is_cow = is_cow_mapping(vma->vm_flags);
1391                                               !! 1262         mmun_start = addr;
1392         if (is_cow) {                         !! 1263         mmun_end   = end;
1393                 mmu_notifier_range_init(&rang !! 1264         if (is_cow)
1394                                         0, sr !! 1265                 mmu_notifier_invalidate_range_start(src_mm, mmun_start,
1395                 mmu_notifier_invalidate_range !! 1266                                                     mmun_end);
1396                 /*                            << 
1397                  * Disabling preemption is no << 
1398                  * the read side doesn't spin << 
1399                  *                            << 
1400                  * Use the raw variant of the << 
1401                  * lockdep complaining about  << 
1402                  */                           << 
1403                 vma_assert_write_locked(src_v << 
1404                 raw_write_seqcount_begin(&src << 
1405         }                                     << 
1406                                                  1267 
1407         ret = 0;                                 1268         ret = 0;
1408         dst_pgd = pgd_offset(dst_mm, addr);      1269         dst_pgd = pgd_offset(dst_mm, addr);
1409         src_pgd = pgd_offset(src_mm, addr);      1270         src_pgd = pgd_offset(src_mm, addr);
1410         do {                                     1271         do {
1411                 next = pgd_addr_end(addr, end    1272                 next = pgd_addr_end(addr, end);
1412                 if (pgd_none_or_clear_bad(src    1273                 if (pgd_none_or_clear_bad(src_pgd))
1413                         continue;                1274                         continue;
1414                 if (unlikely(copy_p4d_range(d !! 1275                 if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
1415                                             a !! 1276                                             vma, addr, next))) {
1416                         untrack_pfn_clear(dst << 
1417                         ret = -ENOMEM;           1277                         ret = -ENOMEM;
1418                         break;                   1278                         break;
1419                 }                                1279                 }
1420         } while (dst_pgd++, src_pgd++, addr =    1280         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1421                                                  1281 
1422         if (is_cow) {                         !! 1282         if (is_cow)
1423                 raw_write_seqcount_end(&src_m !! 1283                 mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
1424                 mmu_notifier_invalidate_range << 
1425         }                                     << 
1426         return ret;                              1284         return ret;
1427 }                                                1285 }
1428                                                  1286 
1429 /* Whether we should zap all COWed (private)  << 
1430 static inline bool should_zap_cows(struct zap << 
1431 {                                             << 
1432         /* By default, zap all pages */       << 
1433         if (!details)                         << 
1434                 return true;                  << 
1435                                               << 
1436         /* Or, we zap COWed pages only if the << 
1437         return details->even_cows;            << 
1438 }                                             << 
1439                                               << 
1440 /* Decides whether we should zap this folio w << 
1441 static inline bool should_zap_folio(struct za << 
1442                                     struct fo << 
1443 {                                             << 
1444         /* If we can make a decision without  << 
1445         if (should_zap_cows(details))         << 
1446                 return true;                  << 
1447                                               << 
1448         /* Otherwise we should only zap non-a << 
1449         return !folio_test_anon(folio);       << 
1450 }                                             << 
1451                                               << 
1452 static inline bool zap_drop_file_uffd_wp(stru << 
1453 {                                             << 
1454         if (!details)                         << 
1455                 return false;                 << 
1456                                               << 
1457         return details->zap_flags & ZAP_FLAG_ << 
1458 }                                             << 
1459                                               << 
1460 /*                                            << 
1461  * This function makes sure that we'll replac << 
1462  * swap special pte marker when necessary. Mu << 
1463  */                                           << 
1464 static inline void                            << 
1465 zap_install_uffd_wp_if_needed(struct vm_area_ << 
1466                               unsigned long a << 
1467                               struct zap_deta << 
1468 {                                             << 
1469         /* Zap on anonymous always means drop << 
1470         if (vma_is_anonymous(vma))            << 
1471                 return;                       << 
1472                                               << 
1473         if (zap_drop_file_uffd_wp(details))   << 
1474                 return;                       << 
1475                                               << 
1476         for (;;) {                            << 
1477                 /* the PFN in the PTE is irre << 
1478                 pte_install_uffd_wp_if_needed << 
1479                 if (--nr == 0)                << 
1480                         break;                << 
1481                 pte++;                        << 
1482                 addr += PAGE_SIZE;            << 
1483         }                                     << 
1484 }                                             << 
1485                                               << 
1486 static __always_inline void zap_present_folio << 
1487                 struct vm_area_struct *vma, s << 
1488                 struct page *page, pte_t *pte << 
1489                 unsigned long addr, struct za << 
1490                 bool *force_flush, bool *forc << 
1491 {                                             << 
1492         struct mm_struct *mm = tlb->mm;       << 
1493         bool delay_rmap = false;              << 
1494                                               << 
1495         if (!folio_test_anon(folio)) {        << 
1496                 ptent = get_and_clear_full_pt << 
1497                 if (pte_dirty(ptent)) {       << 
1498                         folio_mark_dirty(foli << 
1499                         if (tlb_delay_rmap(tl << 
1500                                 delay_rmap =  << 
1501                                 *force_flush  << 
1502                         }                     << 
1503                 }                             << 
1504                 if (pte_young(ptent) && likel << 
1505                         folio_mark_accessed(f << 
1506                 rss[mm_counter(folio)] -= nr; << 
1507         } else {                              << 
1508                 /* We don't need up-to-date a << 
1509                 clear_full_ptes(mm, addr, pte << 
1510                 rss[MM_ANONPAGES] -= nr;      << 
1511         }                                     << 
1512         /* Checking a single PTE in a batch i << 
1513         arch_check_zapped_pte(vma, ptent);    << 
1514         tlb_remove_tlb_entries(tlb, pte, nr,  << 
1515         if (unlikely(userfaultfd_pte_wp(vma,  << 
1516                 zap_install_uffd_wp_if_needed << 
1517                                               << 
1518                                               << 
1519         if (!delay_rmap) {                    << 
1520                 folio_remove_rmap_ptes(folio, << 
1521                                               << 
1522                 if (unlikely(folio_mapcount(f << 
1523                         print_bad_pte(vma, ad << 
1524         }                                     << 
1525         if (unlikely(__tlb_remove_folio_pages << 
1526                 *force_flush = true;          << 
1527                 *force_break = true;          << 
1528         }                                     << 
1529 }                                             << 
1530                                               << 
1531 /*                                            << 
1532  * Zap or skip at least one present PTE, tryi << 
1533  * PTEs that map consecutive pages of the sam << 
1534  *                                            << 
1535  * Returns the number of processed (skipped o << 
1536  */                                           << 
1537 static inline int zap_present_ptes(struct mmu << 
1538                 struct vm_area_struct *vma, p << 
1539                 unsigned int max_nr, unsigned << 
1540                 struct zap_details *details,  << 
1541                 bool *force_break)            << 
1542 {                                             << 
1543         const fpb_t fpb_flags = FPB_IGNORE_DI << 
1544         struct mm_struct *mm = tlb->mm;       << 
1545         struct folio *folio;                  << 
1546         struct page *page;                    << 
1547         int nr;                               << 
1548                                               << 
1549         page = vm_normal_page(vma, addr, pten << 
1550         if (!page) {                          << 
1551                 /* We don't need up-to-date a << 
1552                 ptep_get_and_clear_full(mm, a << 
1553                 arch_check_zapped_pte(vma, pt << 
1554                 tlb_remove_tlb_entry(tlb, pte << 
1555                 if (userfaultfd_pte_wp(vma, p << 
1556                         zap_install_uffd_wp_i << 
1557                                               << 
1558                 ksm_might_unmap_zero_page(mm, << 
1559                 return 1;                     << 
1560         }                                     << 
1561                                               << 
1562         folio = page_folio(page);             << 
1563         if (unlikely(!should_zap_folio(detail << 
1564                 return 1;                     << 
1565                                               << 
1566         /*                                    << 
1567          * Make sure that the common "small f << 
1568          * by keeping the batching logic sepa << 
1569          */                                   << 
1570         if (unlikely(folio_test_large(folio)  << 
1571                 nr = folio_pte_batch(folio, a << 
1572                                      NULL, NU << 
1573                                               << 
1574                 zap_present_folio_ptes(tlb, v << 
1575                                        addr,  << 
1576                                        force_ << 
1577                 return nr;                    << 
1578         }                                     << 
1579         zap_present_folio_ptes(tlb, vma, foli << 
1580                                details, rss,  << 
1581         return 1;                             << 
1582 }                                             << 
1583                                               << 
1584 static unsigned long zap_pte_range(struct mmu    1287 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1585                                 struct vm_are    1288                                 struct vm_area_struct *vma, pmd_t *pmd,
1586                                 unsigned long    1289                                 unsigned long addr, unsigned long end,
1587                                 struct zap_de    1290                                 struct zap_details *details)
1588 {                                                1291 {
1589         bool force_flush = false, force_break << 
1590         struct mm_struct *mm = tlb->mm;          1292         struct mm_struct *mm = tlb->mm;
                                                   >> 1293         int force_flush = 0;
1591         int rss[NR_MM_COUNTERS];                 1294         int rss[NR_MM_COUNTERS];
1592         spinlock_t *ptl;                         1295         spinlock_t *ptl;
1593         pte_t *start_pte;                        1296         pte_t *start_pte;
1594         pte_t *pte;                              1297         pte_t *pte;
1595         swp_entry_t entry;                       1298         swp_entry_t entry;
1596         int nr;                               << 
1597                                                  1299 
1598         tlb_change_page_size(tlb, PAGE_SIZE); !! 1300         tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
                                                   >> 1301 again:
1599         init_rss_vec(rss);                       1302         init_rss_vec(rss);
1600         start_pte = pte = pte_offset_map_lock !! 1303         start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1601         if (!pte)                             !! 1304         pte = start_pte;
1602                 return addr;                  << 
1603                                               << 
1604         flush_tlb_batched_pending(mm);           1305         flush_tlb_batched_pending(mm);
1605         arch_enter_lazy_mmu_mode();              1306         arch_enter_lazy_mmu_mode();
1606         do {                                     1307         do {
1607                 pte_t ptent = ptep_get(pte);  !! 1308                 pte_t ptent = *pte;
1608                 struct folio *folio;          << 
1609                 struct page *page;            << 
1610                 int max_nr;                   << 
1611                                               << 
1612                 nr = 1;                       << 
1613                 if (pte_none(ptent))             1309                 if (pte_none(ptent))
1614                         continue;                1310                         continue;
1615                                                  1311 
1616                 if (need_resched())           << 
1617                         break;                << 
1618                                               << 
1619                 if (pte_present(ptent)) {        1312                 if (pte_present(ptent)) {
1620                         max_nr = (end - addr) !! 1313                         struct page *page;
1621                         nr = zap_present_ptes !! 1314 
1622                                               !! 1315                         page = _vm_normal_page(vma, addr, ptent, true);
1623                                               !! 1316                         if (unlikely(details) && page) {
1624                         if (unlikely(force_br !! 1317                                 /*
1625                                 addr += nr *  !! 1318                                  * unmap_shared_mapping_pages() wants to
                                                   >> 1319                                  * invalidate cache without truncating:
                                                   >> 1320                                  * unmap shared but keep private pages.
                                                   >> 1321                                  */
                                                   >> 1322                                 if (details->check_mapping &&
                                                   >> 1323                                     details->check_mapping != page_rmapping(page))
                                                   >> 1324                                         continue;
                                                   >> 1325                         }
                                                   >> 1326                         ptent = ptep_get_and_clear_full(mm, addr, pte,
                                                   >> 1327                                                         tlb->fullmm);
                                                   >> 1328                         tlb_remove_tlb_entry(tlb, pte, addr);
                                                   >> 1329                         if (unlikely(!page))
                                                   >> 1330                                 continue;
                                                   >> 1331 
                                                   >> 1332                         if (!PageAnon(page)) {
                                                   >> 1333                                 if (pte_dirty(ptent)) {
                                                   >> 1334                                         force_flush = 1;
                                                   >> 1335                                         set_page_dirty(page);
                                                   >> 1336                                 }
                                                   >> 1337                                 if (pte_young(ptent) &&
                                                   >> 1338                                     likely(!(vma->vm_flags & VM_SEQ_READ)))
                                                   >> 1339                                         mark_page_accessed(page);
                                                   >> 1340                         }
                                                   >> 1341                         rss[mm_counter(page)]--;
                                                   >> 1342                         page_remove_rmap(page, false);
                                                   >> 1343                         if (unlikely(page_mapcount(page) < 0))
                                                   >> 1344                                 print_bad_pte(vma, addr, ptent, page);
                                                   >> 1345                         if (unlikely(__tlb_remove_page(tlb, page))) {
                                                   >> 1346                                 force_flush = 1;
                                                   >> 1347                                 addr += PAGE_SIZE;
1626                                 break;           1348                                 break;
1627                         }                        1349                         }
1628                         continue;                1350                         continue;
1629                 }                                1351                 }
1630                                                  1352 
1631                 entry = pte_to_swp_entry(pten    1353                 entry = pte_to_swp_entry(ptent);
1632                 if (is_device_private_entry(e !! 1354                 if (non_swap_entry(entry) && is_device_private_entry(entry)) {
1633                     is_device_exclusive_entry !! 1355                         struct page *page = device_private_entry_to_page(entry);
1634                         page = pfn_swap_entry !! 1356 
1635                         folio = page_folio(pa !! 1357                         if (unlikely(details && details->check_mapping)) {
1636                         if (unlikely(!should_ !! 1358                                 /*
1637                                 continue;     !! 1359                                  * unmap_shared_mapping_pages() wants to
1638                         /*                    !! 1360                                  * invalidate cache without truncating:
1639                          * Both device privat !! 1361                                  * unmap shared but keep private pages.
1640                          * work with anonymou !! 1362                                  */
1641                          * consider uffd-wp b !! 1363                                 if (details->check_mapping !=
1642                          * see zap_install_uf !! 1364                                     page_rmapping(page))
1643                          */                   !! 1365                                         continue;
1644                         WARN_ON_ONCE(!vma_is_ !! 1366                         }
1645                         rss[mm_counter(folio) !! 1367 
1646                         if (is_device_private !! 1368                         pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1647                                 folio_remove_ !! 1369                         rss[mm_counter(page)]--;
1648                         folio_put(folio);     !! 1370                         page_remove_rmap(page, false);
1649                 } else if (!non_swap_entry(en !! 1371                         put_page(page);
1650                         max_nr = (end - addr) !! 1372                         continue;
1651                         nr = swap_pte_batch(p !! 1373                 }
1652                         /* Genuine swap entri !! 1374 
1653                         if (!should_zap_cows( !! 1375                 /* If details->check_mapping, we leave swap entries. */
1654                                 continue;     !! 1376                 if (unlikely(details))
1655                         rss[MM_SWAPENTS] -= n !! 1377                         continue;
1656                         free_swap_and_cache_n !! 1378 
1657                 } else if (is_migration_entry !! 1379                 entry = pte_to_swp_entry(ptent);
1658                         folio = pfn_swap_entr !! 1380                 if (!non_swap_entry(entry))
1659                         if (!should_zap_folio !! 1381                         rss[MM_SWAPENTS]--;
1660                                 continue;     !! 1382                 else if (is_migration_entry(entry)) {
1661                         rss[mm_counter(folio) !! 1383                         struct page *page;
1662                 } else if (pte_marker_entry_u !! 1384 
1663                         /*                    !! 1385                         page = migration_entry_to_page(entry);
1664                          * For anon: always d !! 1386                         rss[mm_counter(page)]--;
1665                          * drop the marker if !! 1387                 }
1666                          */                   !! 1388                 if (unlikely(!free_swap_and_cache(entry)))
1667                         if (!vma_is_anonymous !! 1389                         print_bad_pte(vma, addr, ptent, NULL);
1668                             !zap_drop_file_uf !! 1390                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1669                                 continue;     !! 1391         } while (pte++, addr += PAGE_SIZE, addr != end);
1670                 } else if (is_hwpoison_entry( << 
1671                            is_poisoned_swp_en << 
1672                         if (!should_zap_cows( << 
1673                                 continue;     << 
1674                 } else {                      << 
1675                         /* We should have cov << 
1676                         pr_alert("unrecognize << 
1677                         WARN_ON_ONCE(1);      << 
1678                 }                             << 
1679                 clear_not_present_full_ptes(m << 
1680                 zap_install_uffd_wp_if_needed << 
1681         } while (pte += nr, addr += PAGE_SIZE << 
1682                                                  1392 
1683         add_mm_rss_vec(mm, rss);                 1393         add_mm_rss_vec(mm, rss);
1684         arch_leave_lazy_mmu_mode();              1394         arch_leave_lazy_mmu_mode();
1685                                                  1395 
1686         /* Do the actual TLB flush before dro    1396         /* Do the actual TLB flush before dropping ptl */
1687         if (force_flush) {                    !! 1397         if (force_flush)
1688                 tlb_flush_mmu_tlbonly(tlb);      1398                 tlb_flush_mmu_tlbonly(tlb);
1689                 tlb_flush_rmaps(tlb, vma);    << 
1690         }                                     << 
1691         pte_unmap_unlock(start_pte, ptl);        1399         pte_unmap_unlock(start_pte, ptl);
1692                                                  1400 
1693         /*                                       1401         /*
1694          * If we forced a TLB flush (either d    1402          * If we forced a TLB flush (either due to running out of
1695          * batch buffers or because we needed    1403          * batch buffers or because we needed to flush dirty TLB
1696          * entries before releasing the ptl),    1404          * entries before releasing the ptl), free the batched
1697          * memory too. Come back again if we  !! 1405          * memory too. Restart if we didn't do everything.
1698          */                                      1406          */
1699         if (force_flush)                      !! 1407         if (force_flush) {
1700                 tlb_flush_mmu(tlb);           !! 1408                 force_flush = 0;
                                                   >> 1409                 tlb_flush_mmu_free(tlb);
                                                   >> 1410                 if (addr != end)
                                                   >> 1411                         goto again;
                                                   >> 1412         }
1701                                                  1413 
1702         return addr;                             1414         return addr;
1703 }                                                1415 }
1704                                                  1416 
1705 static inline unsigned long zap_pmd_range(str    1417 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1706                                 struct vm_are    1418                                 struct vm_area_struct *vma, pud_t *pud,
1707                                 unsigned long    1419                                 unsigned long addr, unsigned long end,
1708                                 struct zap_de    1420                                 struct zap_details *details)
1709 {                                                1421 {
1710         pmd_t *pmd;                              1422         pmd_t *pmd;
1711         unsigned long next;                      1423         unsigned long next;
1712                                                  1424 
1713         pmd = pmd_offset(pud, addr);             1425         pmd = pmd_offset(pud, addr);
1714         do {                                     1426         do {
1715                 next = pmd_addr_end(addr, end    1427                 next = pmd_addr_end(addr, end);
1716                 if (is_swap_pmd(*pmd) || pmd_    1428                 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1717                         if (next - addr != HP    1429                         if (next - addr != HPAGE_PMD_SIZE)
1718                                 __split_huge_    1430                                 __split_huge_pmd(vma, pmd, addr, false, NULL);
1719                         else if (zap_huge_pmd !! 1431                         else if (zap_huge_pmd(tlb, vma, pmd, addr))
1720                                 addr = next;  !! 1432                                 goto next;
1721                                 continue;     << 
1722                         }                     << 
1723                         /* fall through */       1433                         /* fall through */
1724                 } else if (details && details << 
1725                            folio_test_pmd_map << 
1726                            next - addr == HPA << 
1727                         spinlock_t *ptl = pmd << 
1728                         /*                    << 
1729                          * Take and drop THP  << 
1730                          * prematurely, while << 
1731                          * but not yet decrem << 
1732                          */                   << 
1733                         spin_unlock(ptl);     << 
1734                 }                                1434                 }
1735                 if (pmd_none(*pmd)) {         !! 1435                 /*
1736                         addr = next;          !! 1436                  * Here there can be other concurrent MADV_DONTNEED or
1737                         continue;             !! 1437                  * trans huge page faults running, and if the pmd is
1738                 }                             !! 1438                  * none or trans huge it can change under us. This is
1739                 addr = zap_pte_range(tlb, vma !! 1439                  * because MADV_DONTNEED holds the mmap_sem in read
1740                 if (addr != next)             !! 1440                  * mode.
1741                         pmd--;                !! 1441                  */
1742         } while (pmd++, cond_resched(), addr  !! 1442                 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                                                   >> 1443                         goto next;
                                                   >> 1444                 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
                                                   >> 1445 next:
                                                   >> 1446                 cond_resched();
                                                   >> 1447         } while (pmd++, addr = next, addr != end);
1743                                                  1448 
1744         return addr;                             1449         return addr;
1745 }                                                1450 }
1746                                                  1451 
1747 static inline unsigned long zap_pud_range(str    1452 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1748                                 struct vm_are    1453                                 struct vm_area_struct *vma, p4d_t *p4d,
1749                                 unsigned long    1454                                 unsigned long addr, unsigned long end,
1750                                 struct zap_de    1455                                 struct zap_details *details)
1751 {                                                1456 {
1752         pud_t *pud;                              1457         pud_t *pud;
1753         unsigned long next;                      1458         unsigned long next;
1754                                                  1459 
1755         pud = pud_offset(p4d, addr);             1460         pud = pud_offset(p4d, addr);
1756         do {                                     1461         do {
1757                 next = pud_addr_end(addr, end    1462                 next = pud_addr_end(addr, end);
1758                 if (pud_trans_huge(*pud) || p    1463                 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1759                         if (next - addr != HP    1464                         if (next - addr != HPAGE_PUD_SIZE) {
1760                                 mmap_assert_l !! 1465                                 VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1761                                 split_huge_pu    1466                                 split_huge_pud(vma, pud, addr);
1762                         } else if (zap_huge_p    1467                         } else if (zap_huge_pud(tlb, vma, pud, addr))
1763                                 goto next;       1468                                 goto next;
1764                         /* fall through */       1469                         /* fall through */
1765                 }                                1470                 }
1766                 if (pud_none_or_clear_bad(pud    1471                 if (pud_none_or_clear_bad(pud))
1767                         continue;                1472                         continue;
1768                 next = zap_pmd_range(tlb, vma    1473                 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1769 next:                                            1474 next:
1770                 cond_resched();                  1475                 cond_resched();
1771         } while (pud++, addr = next, addr !=     1476         } while (pud++, addr = next, addr != end);
1772                                                  1477 
1773         return addr;                             1478         return addr;
1774 }                                                1479 }
1775                                                  1480 
1776 static inline unsigned long zap_p4d_range(str    1481 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1777                                 struct vm_are    1482                                 struct vm_area_struct *vma, pgd_t *pgd,
1778                                 unsigned long    1483                                 unsigned long addr, unsigned long end,
1779                                 struct zap_de    1484                                 struct zap_details *details)
1780 {                                                1485 {
1781         p4d_t *p4d;                              1486         p4d_t *p4d;
1782         unsigned long next;                      1487         unsigned long next;
1783                                                  1488 
1784         p4d = p4d_offset(pgd, addr);             1489         p4d = p4d_offset(pgd, addr);
1785         do {                                     1490         do {
1786                 next = p4d_addr_end(addr, end    1491                 next = p4d_addr_end(addr, end);
1787                 if (p4d_none_or_clear_bad(p4d    1492                 if (p4d_none_or_clear_bad(p4d))
1788                         continue;                1493                         continue;
1789                 next = zap_pud_range(tlb, vma    1494                 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1790         } while (p4d++, addr = next, addr !=     1495         } while (p4d++, addr = next, addr != end);
1791                                                  1496 
1792         return addr;                             1497         return addr;
1793 }                                                1498 }
1794                                                  1499 
1795 void unmap_page_range(struct mmu_gather *tlb,    1500 void unmap_page_range(struct mmu_gather *tlb,
1796                              struct vm_area_s    1501                              struct vm_area_struct *vma,
1797                              unsigned long ad    1502                              unsigned long addr, unsigned long end,
1798                              struct zap_detai    1503                              struct zap_details *details)
1799 {                                                1504 {
1800         pgd_t *pgd;                              1505         pgd_t *pgd;
1801         unsigned long next;                      1506         unsigned long next;
1802                                                  1507 
1803         BUG_ON(addr >= end);                     1508         BUG_ON(addr >= end);
1804         tlb_start_vma(tlb, vma);                 1509         tlb_start_vma(tlb, vma);
1805         pgd = pgd_offset(vma->vm_mm, addr);      1510         pgd = pgd_offset(vma->vm_mm, addr);
1806         do {                                     1511         do {
1807                 next = pgd_addr_end(addr, end    1512                 next = pgd_addr_end(addr, end);
1808                 if (pgd_none_or_clear_bad(pgd    1513                 if (pgd_none_or_clear_bad(pgd))
1809                         continue;                1514                         continue;
1810                 next = zap_p4d_range(tlb, vma    1515                 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1811         } while (pgd++, addr = next, addr !=     1516         } while (pgd++, addr = next, addr != end);
1812         tlb_end_vma(tlb, vma);                   1517         tlb_end_vma(tlb, vma);
1813 }                                                1518 }
1814                                                  1519 
1815                                                  1520 
1816 static void unmap_single_vma(struct mmu_gathe    1521 static void unmap_single_vma(struct mmu_gather *tlb,
1817                 struct vm_area_struct *vma, u    1522                 struct vm_area_struct *vma, unsigned long start_addr,
1818                 unsigned long end_addr,          1523                 unsigned long end_addr,
1819                 struct zap_details *details,  !! 1524                 struct zap_details *details)
1820 {                                                1525 {
1821         unsigned long start = max(vma->vm_sta    1526         unsigned long start = max(vma->vm_start, start_addr);
1822         unsigned long end;                       1527         unsigned long end;
1823                                                  1528 
1824         if (start >= vma->vm_end)                1529         if (start >= vma->vm_end)
1825                 return;                          1530                 return;
1826         end = min(vma->vm_end, end_addr);        1531         end = min(vma->vm_end, end_addr);
1827         if (end <= vma->vm_start)                1532         if (end <= vma->vm_start)
1828                 return;                          1533                 return;
1829                                                  1534 
1830         if (vma->vm_file)                        1535         if (vma->vm_file)
1831                 uprobe_munmap(vma, start, end    1536                 uprobe_munmap(vma, start, end);
1832                                                  1537 
1833         if (unlikely(vma->vm_flags & VM_PFNMA    1538         if (unlikely(vma->vm_flags & VM_PFNMAP))
1834                 untrack_pfn(vma, 0, 0, mm_wr_ !! 1539                 untrack_pfn(vma, 0, 0);
1835                                                  1540 
1836         if (start != end) {                      1541         if (start != end) {
1837                 if (unlikely(is_vm_hugetlb_pa    1542                 if (unlikely(is_vm_hugetlb_page(vma))) {
1838                         /*                       1543                         /*
1839                          * It is undesirable     1544                          * It is undesirable to test vma->vm_file as it
1840                          * should be non-null    1545                          * should be non-null for valid hugetlb area.
1841                          * However, vm_file w    1546                          * However, vm_file will be NULL in the error
1842                          * cleanup path of mm    1547                          * cleanup path of mmap_region. When
1843                          * hugetlbfs ->mmap m    1548                          * hugetlbfs ->mmap method fails,
1844                          * mmap_region() null    1549                          * mmap_region() nullifies vma->vm_file
1845                          * before calling thi    1550                          * before calling this function to clean up.
1846                          * Since no pte has a    1551                          * Since no pte has actually been setup, it is
1847                          * safe to do nothing    1552                          * safe to do nothing in this case.
1848                          */                      1553                          */
1849                         if (vma->vm_file) {      1554                         if (vma->vm_file) {
1850                                 zap_flags_t z !! 1555                                 i_mmap_lock_write(vma->vm_file->f_mapping);
1851                                     details-> !! 1556                                 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1852                                 __unmap_hugep !! 1557                                 i_mmap_unlock_write(vma->vm_file->f_mapping);
1853                                               << 
1854                         }                        1558                         }
1855                 } else                           1559                 } else
1856                         unmap_page_range(tlb,    1560                         unmap_page_range(tlb, vma, start, end, details);
1857         }                                        1561         }
1858 }                                                1562 }
1859                                                  1563 
1860 /**                                              1564 /**
1861  * unmap_vmas - unmap a range of memory cover    1565  * unmap_vmas - unmap a range of memory covered by a list of vma's
1862  * @tlb: address of the caller's struct mmu_g    1566  * @tlb: address of the caller's struct mmu_gather
1863  * @mas: the maple state                      << 
1864  * @vma: the starting vma                        1567  * @vma: the starting vma
1865  * @start_addr: virtual address at which to s    1568  * @start_addr: virtual address at which to start unmapping
1866  * @end_addr: virtual address at which to end    1569  * @end_addr: virtual address at which to end unmapping
1867  * @tree_end: The maximum index to check      << 
1868  * @mm_wr_locked: lock flag                   << 
1869  *                                               1570  *
1870  * Unmap all pages in the vma list.              1571  * Unmap all pages in the vma list.
1871  *                                               1572  *
1872  * Only addresses between `start' and `end' w    1573  * Only addresses between `start' and `end' will be unmapped.
1873  *                                               1574  *
1874  * The VMA list must be sorted in ascending v    1575  * The VMA list must be sorted in ascending virtual address order.
1875  *                                               1576  *
1876  * unmap_vmas() assumes that the caller will     1577  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1877  * range after unmap_vmas() returns.  So the     1578  * range after unmap_vmas() returns.  So the only responsibility here is to
1878  * ensure that any thus-far unmapped pages ar    1579  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1879  * drops the lock and schedules.                 1580  * drops the lock and schedules.
1880  */                                              1581  */
1881 void unmap_vmas(struct mmu_gather *tlb, struc !! 1582 void unmap_vmas(struct mmu_gather *tlb,
1882                 struct vm_area_struct *vma, u    1583                 struct vm_area_struct *vma, unsigned long start_addr,
1883                 unsigned long end_addr, unsig !! 1584                 unsigned long end_addr)
1884                 bool mm_wr_locked)            << 
1885 {                                                1585 {
1886         struct mmu_notifier_range range;      !! 1586         struct mm_struct *mm = vma->vm_mm;
1887         struct zap_details details = {        << 
1888                 .zap_flags = ZAP_FLAG_DROP_MA << 
1889                 /* Careful - we need to zap p << 
1890                 .even_cows = true,            << 
1891         };                                    << 
1892                                                  1587 
1893         mmu_notifier_range_init(&range, MMU_N !! 1588         mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1894                                 start_addr, e !! 1589         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1895         mmu_notifier_invalidate_range_start(& !! 1590                 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1896         do {                                  !! 1591         mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1897                 unsigned long start = start_a !! 1592 }
1898                 unsigned long end = end_addr; !! 1593 
1899                 hugetlb_zap_begin(vma, &start !! 1594 /**
1900                 unmap_single_vma(tlb, vma, st !! 1595  * zap_page_range - remove user pages in a given range
1901                                  mm_wr_locked !! 1596  * @vma: vm_area_struct holding the applicable pages
1902                 hugetlb_zap_end(vma, &details !! 1597  * @start: starting address of pages to zap
1903                 vma = mas_find(mas, tree_end  !! 1598  * @size: number of bytes to zap
1904         } while (vma && likely(!xa_is_zero(vm !! 1599  *
1905         mmu_notifier_invalidate_range_end(&ra !! 1600  * Caller must protect the VMA list
                                                   >> 1601  */
                                                   >> 1602 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
                                                   >> 1603                 unsigned long size)
                                                   >> 1604 {
                                                   >> 1605         struct mm_struct *mm = vma->vm_mm;
                                                   >> 1606         struct mmu_gather tlb;
                                                   >> 1607         unsigned long end = start + size;
                                                   >> 1608 
                                                   >> 1609         lru_add_drain();
                                                   >> 1610         tlb_gather_mmu(&tlb, mm, start, end);
                                                   >> 1611         update_hiwater_rss(mm);
                                                   >> 1612         mmu_notifier_invalidate_range_start(mm, start, end);
                                                   >> 1613         for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
                                                   >> 1614                 unmap_single_vma(&tlb, vma, start, end, NULL);
                                                   >> 1615 
                                                   >> 1616                 /*
                                                   >> 1617                  * zap_page_range does not specify whether mmap_sem should be
                                                   >> 1618                  * held for read or write. That allows parallel zap_page_range
                                                   >> 1619                  * operations to unmap a PTE and defer a flush meaning that
                                                   >> 1620                  * this call observes pte_none and fails to flush the TLB.
                                                   >> 1621                  * Rather than adding a complex API, ensure that no stale
                                                   >> 1622                  * TLB entries exist when this call returns.
                                                   >> 1623                  */
                                                   >> 1624                 flush_tlb_range(vma, start, end);
                                                   >> 1625         }
                                                   >> 1626 
                                                   >> 1627         mmu_notifier_invalidate_range_end(mm, start, end);
                                                   >> 1628         tlb_finish_mmu(&tlb, start, end);
1906 }                                                1629 }
1907                                                  1630 
1908 /**                                              1631 /**
1909  * zap_page_range_single - remove user pages     1632  * zap_page_range_single - remove user pages in a given range
1910  * @vma: vm_area_struct holding the applicabl    1633  * @vma: vm_area_struct holding the applicable pages
1911  * @address: starting address of pages to zap    1634  * @address: starting address of pages to zap
1912  * @size: number of bytes to zap                 1635  * @size: number of bytes to zap
1913  * @details: details of shared cache invalida    1636  * @details: details of shared cache invalidation
1914  *                                               1637  *
1915  * The range must fit into one VMA.              1638  * The range must fit into one VMA.
1916  */                                              1639  */
1917 void zap_page_range_single(struct vm_area_str !! 1640 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1918                 unsigned long size, struct za    1641                 unsigned long size, struct zap_details *details)
1919 {                                                1642 {
1920         const unsigned long end = address + s !! 1643         struct mm_struct *mm = vma->vm_mm;
1921         struct mmu_notifier_range range;      << 
1922         struct mmu_gather tlb;                   1644         struct mmu_gather tlb;
                                                   >> 1645         unsigned long end = address + size;
1923                                                  1646 
1924         lru_add_drain();                         1647         lru_add_drain();
1925         mmu_notifier_range_init(&range, MMU_N !! 1648         tlb_gather_mmu(&tlb, mm, address, end);
1926                                 address, end) !! 1649         update_hiwater_rss(mm);
1927         hugetlb_zap_begin(vma, &range.start,  !! 1650         mmu_notifier_invalidate_range_start(mm, address, end);
1928         tlb_gather_mmu(&tlb, vma->vm_mm);     !! 1651         unmap_single_vma(&tlb, vma, address, end, details);
1929         update_hiwater_rss(vma->vm_mm);       !! 1652         mmu_notifier_invalidate_range_end(mm, address, end);
1930         mmu_notifier_invalidate_range_start(& !! 1653         tlb_finish_mmu(&tlb, address, end);
1931         /*                                    << 
1932          * unmap 'address-end' not 'range.sta << 
1933          * could have been expanded for huget << 
1934          */                                   << 
1935         unmap_single_vma(&tlb, vma, address,  << 
1936         mmu_notifier_invalidate_range_end(&ra << 
1937         tlb_finish_mmu(&tlb);                 << 
1938         hugetlb_zap_end(vma, details);        << 
1939 }                                                1654 }
1940                                                  1655 
1941 /**                                              1656 /**
1942  * zap_vma_ptes - remove ptes mapping the vma    1657  * zap_vma_ptes - remove ptes mapping the vma
1943  * @vma: vm_area_struct holding ptes to be za    1658  * @vma: vm_area_struct holding ptes to be zapped
1944  * @address: starting address of pages to zap    1659  * @address: starting address of pages to zap
1945  * @size: number of bytes to zap                 1660  * @size: number of bytes to zap
1946  *                                               1661  *
1947  * This function only unmaps ptes assigned to    1662  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1948  *                                               1663  *
1949  * The entire address range must be fully con    1664  * The entire address range must be fully contained within the vma.
1950  *                                               1665  *
1951  */                                              1666  */
1952 void zap_vma_ptes(struct vm_area_struct *vma,    1667 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1953                 unsigned long size)              1668                 unsigned long size)
1954 {                                                1669 {
1955         if (!range_in_vma(vma, address, addre !! 1670         if (address < vma->vm_start || address + size > vma->vm_end ||
1956                         !(vma->vm_flags & VM_    1671                         !(vma->vm_flags & VM_PFNMAP))
1957                 return;                          1672                 return;
1958                                                  1673 
1959         zap_page_range_single(vma, address, s    1674         zap_page_range_single(vma, address, size, NULL);
1960 }                                                1675 }
1961 EXPORT_SYMBOL_GPL(zap_vma_ptes);                 1676 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1962                                                  1677 
1963 static pmd_t *walk_to_pmd(struct mm_struct *m !! 1678 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
                                                   >> 1679                         spinlock_t **ptl)
1964 {                                                1680 {
1965         pgd_t *pgd;                              1681         pgd_t *pgd;
1966         p4d_t *p4d;                              1682         p4d_t *p4d;
1967         pud_t *pud;                              1683         pud_t *pud;
1968         pmd_t *pmd;                              1684         pmd_t *pmd;
1969                                                  1685 
1970         pgd = pgd_offset(mm, addr);              1686         pgd = pgd_offset(mm, addr);
1971         p4d = p4d_alloc(mm, pgd, addr);          1687         p4d = p4d_alloc(mm, pgd, addr);
1972         if (!p4d)                                1688         if (!p4d)
1973                 return NULL;                     1689                 return NULL;
1974         pud = pud_alloc(mm, p4d, addr);          1690         pud = pud_alloc(mm, p4d, addr);
1975         if (!pud)                                1691         if (!pud)
1976                 return NULL;                     1692                 return NULL;
1977         pmd = pmd_alloc(mm, pud, addr);          1693         pmd = pmd_alloc(mm, pud, addr);
1978         if (!pmd)                                1694         if (!pmd)
1979                 return NULL;                     1695                 return NULL;
1980                                                  1696 
1981         VM_BUG_ON(pmd_trans_huge(*pmd));         1697         VM_BUG_ON(pmd_trans_huge(*pmd));
1982         return pmd;                           << 
1983 }                                             << 
1984                                               << 
1985 pte_t *__get_locked_pte(struct mm_struct *mm, << 
1986                         spinlock_t **ptl)     << 
1987 {                                             << 
1988         pmd_t *pmd = walk_to_pmd(mm, addr);   << 
1989                                               << 
1990         if (!pmd)                             << 
1991                 return NULL;                  << 
1992         return pte_alloc_map_lock(mm, pmd, ad    1698         return pte_alloc_map_lock(mm, pmd, addr, ptl);
1993 }                                                1699 }
1994                                                  1700 
1995 static bool vm_mixed_zeropage_allowed(struct  !! 1701 /*
1996 {                                             !! 1702  * This is the old fallback for page remapping.
1997         VM_WARN_ON_ONCE(vma->vm_flags & VM_PF !! 1703  *
1998         /*                                    !! 1704  * For historical reasons, it only allows reserved pages. Only
1999          * Whoever wants to forbid the zeropa !! 1705  * old drivers should use this, and they needed to mark their
2000          * might already have been mapped has !! 1706  * pages reserved for the old functions anyway.
2001          * bail out on any zeropages. Zeropag !! 1707  */
2002          * be unshared using FAULT_FLAG_UNSHA << 
2003          */                                   << 
2004         if (mm_forbids_zeropage(vma->vm_mm))  << 
2005                 return false;                 << 
2006         /* zeropages in COW mappings are comm << 
2007         if (is_cow_mapping(vma->vm_flags))    << 
2008                 return true;                  << 
2009         /* Mappings that do not allow for wri << 
2010         if (!(vma->vm_flags & (VM_WRITE | VM_ << 
2011                 return true;                  << 
2012         /*                                    << 
2013          * Why not allow any VMA that has vm_ << 
2014          * find the shared zeropage and longt << 
2015          * be problematic as soon as the zero << 
2016          * page due to vma->vm_ops->pfn_mkwri << 
2017          * now differ to what GUP looked up.  << 
2018          * FOLL_LONGTERM and VM_IO is incompa << 
2019          * check_vma_flags).                  << 
2020          */                                   << 
2021         return vma->vm_ops && vma->vm_ops->pf << 
2022                (vma_is_fsdax(vma) || vma->vm_ << 
2023 }                                             << 
2024                                               << 
2025 static int validate_page_before_insert(struct << 
2026                                        struct << 
2027 {                                             << 
2028         struct folio *folio = page_folio(page << 
2029                                               << 
2030         if (!folio_ref_count(folio))          << 
2031                 return -EINVAL;               << 
2032         if (unlikely(is_zero_folio(folio))) { << 
2033                 if (!vm_mixed_zeropage_allowe << 
2034                         return -EINVAL;       << 
2035                 return 0;                     << 
2036         }                                     << 
2037         if (folio_test_anon(folio) || folio_t << 
2038             page_has_type(page))              << 
2039                 return -EINVAL;               << 
2040         flush_dcache_folio(folio);            << 
2041         return 0;                             << 
2042 }                                             << 
2043                                               << 
2044 static int insert_page_into_pte_locked(struct << 
2045                         unsigned long addr, s << 
2046 {                                             << 
2047         struct folio *folio = page_folio(page << 
2048         pte_t pteval;                         << 
2049                                               << 
2050         if (!pte_none(ptep_get(pte)))         << 
2051                 return -EBUSY;                << 
2052         /* Ok, finally just insert the thing. << 
2053         pteval = mk_pte(page, prot);          << 
2054         if (unlikely(is_zero_folio(folio))) { << 
2055                 pteval = pte_mkspecial(pteval << 
2056         } else {                              << 
2057                 folio_get(folio);             << 
2058                 inc_mm_counter(vma->vm_mm, mm << 
2059                 folio_add_file_rmap_pte(folio << 
2060         }                                     << 
2061         set_pte_at(vma->vm_mm, addr, pte, pte << 
2062         return 0;                             << 
2063 }                                             << 
2064                                               << 
2065 static int insert_page(struct vm_area_struct     1708 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2066                         struct page *page, pg    1709                         struct page *page, pgprot_t prot)
2067 {                                                1710 {
                                                   >> 1711         struct mm_struct *mm = vma->vm_mm;
2068         int retval;                              1712         int retval;
2069         pte_t *pte;                              1713         pte_t *pte;
2070         spinlock_t *ptl;                         1714         spinlock_t *ptl;
2071                                                  1715 
2072         retval = validate_page_before_insert( !! 1716         retval = -EINVAL;
2073         if (retval)                           !! 1717         if (PageAnon(page))
2074                 goto out;                        1718                 goto out;
2075         retval = -ENOMEM;                        1719         retval = -ENOMEM;
2076         pte = get_locked_pte(vma->vm_mm, addr !! 1720         flush_dcache_page(page);
                                                   >> 1721         pte = get_locked_pte(mm, addr, &ptl);
2077         if (!pte)                                1722         if (!pte)
2078                 goto out;                        1723                 goto out;
2079         retval = insert_page_into_pte_locked( !! 1724         retval = -EBUSY;
2080         pte_unmap_unlock(pte, ptl);           !! 1725         if (!pte_none(*pte))
2081 out:                                          !! 1726                 goto out_unlock;
2082         return retval;                        << 
2083 }                                             << 
2084                                               << 
2085 static int insert_page_in_batch_locked(struct << 
2086                         unsigned long addr, s << 
2087 {                                             << 
2088         int err;                              << 
2089                                               << 
2090         err = validate_page_before_insert(vma << 
2091         if (err)                              << 
2092                 return err;                   << 
2093         return insert_page_into_pte_locked(vm << 
2094 }                                             << 
2095                                               << 
2096 /* insert_pages() amortizes the cost of spinl << 
2097  * when inserting pages in a loop.            << 
2098  */                                           << 
2099 static int insert_pages(struct vm_area_struct << 
2100                         struct page **pages,  << 
2101 {                                             << 
2102         pmd_t *pmd = NULL;                    << 
2103         pte_t *start_pte, *pte;               << 
2104         spinlock_t *pte_lock;                 << 
2105         struct mm_struct *const mm = vma->vm_ << 
2106         unsigned long curr_page_idx = 0;      << 
2107         unsigned long remaining_pages_total = << 
2108         unsigned long pages_to_write_in_pmd;  << 
2109         int ret;                              << 
2110 more:                                         << 
2111         ret = -EFAULT;                        << 
2112         pmd = walk_to_pmd(mm, addr);          << 
2113         if (!pmd)                             << 
2114                 goto out;                     << 
2115                                               << 
2116         pages_to_write_in_pmd = min_t(unsigne << 
2117                 remaining_pages_total, PTRS_P << 
2118                                                  1727 
2119         /* Allocate the PTE if necessary; tak !! 1728         /* Ok, finally just insert the thing.. */
2120         ret = -ENOMEM;                        !! 1729         get_page(page);
2121         if (pte_alloc(mm, pmd))               !! 1730         inc_mm_counter_fast(mm, mm_counter_file(page));
2122                 goto out;                     !! 1731         page_add_file_rmap(page, false);
                                                   >> 1732         set_pte_at(mm, addr, pte, mk_pte(page, prot));
2123                                                  1733 
2124         while (pages_to_write_in_pmd) {       !! 1734         retval = 0;
2125                 int pte_idx = 0;              !! 1735         pte_unmap_unlock(pte, ptl);
2126                 const int batch_size = min_t( !! 1736         return retval;
2127                                               !! 1737 out_unlock:
2128                 start_pte = pte_offset_map_lo !! 1738         pte_unmap_unlock(pte, ptl);
2129                 if (!start_pte) {             << 
2130                         ret = -EFAULT;        << 
2131                         goto out;             << 
2132                 }                             << 
2133                 for (pte = start_pte; pte_idx << 
2134                         int err = insert_page << 
2135                                 addr, pages[c << 
2136                         if (unlikely(err)) {  << 
2137                                 pte_unmap_unl << 
2138                                 ret = err;    << 
2139                                 remaining_pag << 
2140                                 goto out;     << 
2141                         }                     << 
2142                         addr += PAGE_SIZE;    << 
2143                         ++curr_page_idx;      << 
2144                 }                             << 
2145                 pte_unmap_unlock(start_pte, p << 
2146                 pages_to_write_in_pmd -= batc << 
2147                 remaining_pages_total -= batc << 
2148         }                                     << 
2149         if (remaining_pages_total)            << 
2150                 goto more;                    << 
2151         ret = 0;                              << 
2152 out:                                             1739 out:
2153         *num = remaining_pages_total;         !! 1740         return retval;
2154         return ret;                           << 
2155 }                                             << 
2156                                               << 
2157 /**                                           << 
2158  * vm_insert_pages - insert multiple pages in << 
2159  * @vma: user vma to map to                   << 
2160  * @addr: target start user address of these  << 
2161  * @pages: source kernel pages                << 
2162  * @num: in: number of pages to map. out: num << 
2163  * mapped. (0 means all pages were successful << 
2164  *                                            << 
2165  * Preferred over vm_insert_page() when inser << 
2166  *                                            << 
2167  * In case of error, we may have mapped a sub << 
2168  * pages. It is the caller's responsibility t << 
2169  *                                            << 
2170  * The same restrictions apply as in vm_inser << 
2171  */                                           << 
2172 int vm_insert_pages(struct vm_area_struct *vm << 
2173                         struct page **pages,  << 
2174 {                                             << 
2175         const unsigned long end_addr = addr + << 
2176                                               << 
2177         if (addr < vma->vm_start || end_addr  << 
2178                 return -EFAULT;               << 
2179         if (!(vma->vm_flags & VM_MIXEDMAP)) { << 
2180                 BUG_ON(mmap_read_trylock(vma- << 
2181                 BUG_ON(vma->vm_flags & VM_PFN << 
2182                 vm_flags_set(vma, VM_MIXEDMAP << 
2183         }                                     << 
2184         /* Defer page refcount checking till  << 
2185         return insert_pages(vma, addr, pages, << 
2186 }                                                1741 }
2187 EXPORT_SYMBOL(vm_insert_pages);               << 
2188                                                  1742 
2189 /**                                              1743 /**
2190  * vm_insert_page - insert single page into u    1744  * vm_insert_page - insert single page into user vma
2191  * @vma: user vma to map to                      1745  * @vma: user vma to map to
2192  * @addr: target user address of this page       1746  * @addr: target user address of this page
2193  * @page: source kernel page                     1747  * @page: source kernel page
2194  *                                               1748  *
2195  * This allows drivers to insert individual p    1749  * This allows drivers to insert individual pages they've allocated
2196  * into a user vma. The zeropage is supported !! 1750  * into a user vma.
2197  * see vm_mixed_zeropage_allowed().           << 
2198  *                                               1751  *
2199  * The page has to be a nice clean _individua    1752  * The page has to be a nice clean _individual_ kernel allocation.
2200  * If you allocate a compound page, you need     1753  * If you allocate a compound page, you need to have marked it as
2201  * such (__GFP_COMP), or manually just split     1754  * such (__GFP_COMP), or manually just split the page up yourself
2202  * (see split_page()).                           1755  * (see split_page()).
2203  *                                               1756  *
2204  * NOTE! Traditionally this was done with "re    1757  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2205  * took an arbitrary page protection paramete    1758  * took an arbitrary page protection parameter. This doesn't allow
2206  * that. Your vma protection will have to be     1759  * that. Your vma protection will have to be set up correctly, which
2207  * means that if you want a shared writable m    1760  * means that if you want a shared writable mapping, you'd better
2208  * ask for a shared writable mapping!            1761  * ask for a shared writable mapping!
2209  *                                               1762  *
2210  * The page does not need to be reserved.        1763  * The page does not need to be reserved.
2211  *                                               1764  *
2212  * Usually this function is called from f_op-    1765  * Usually this function is called from f_op->mmap() handler
2213  * under mm->mmap_lock write-lock, so it can  !! 1766  * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
2214  * Caller must set VM_MIXEDMAP on vma if it w    1767  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2215  * function from other places, for example fr    1768  * function from other places, for example from page-fault handler.
2216  *                                            << 
2217  * Return: %0 on success, negative error code << 
2218  */                                              1769  */
2219 int vm_insert_page(struct vm_area_struct *vma    1770 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2220                         struct page *page)       1771                         struct page *page)
2221 {                                                1772 {
2222         if (addr < vma->vm_start || addr >= v    1773         if (addr < vma->vm_start || addr >= vma->vm_end)
2223                 return -EFAULT;                  1774                 return -EFAULT;
                                                   >> 1775         if (!page_count(page))
                                                   >> 1776                 return -EINVAL;
2224         if (!(vma->vm_flags & VM_MIXEDMAP)) {    1777         if (!(vma->vm_flags & VM_MIXEDMAP)) {
2225                 BUG_ON(mmap_read_trylock(vma- !! 1778                 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
2226                 BUG_ON(vma->vm_flags & VM_PFN    1779                 BUG_ON(vma->vm_flags & VM_PFNMAP);
2227                 vm_flags_set(vma, VM_MIXEDMAP !! 1780                 vma->vm_flags |= VM_MIXEDMAP;
2228         }                                        1781         }
2229         return insert_page(vma, addr, page, v    1782         return insert_page(vma, addr, page, vma->vm_page_prot);
2230 }                                                1783 }
2231 EXPORT_SYMBOL(vm_insert_page);                   1784 EXPORT_SYMBOL(vm_insert_page);
2232                                                  1785 
2233 /*                                            !! 1786 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2234  * __vm_map_pages - maps range of kernel page << 
2235  * @vma: user vma to map to                   << 
2236  * @pages: pointer to array of source kernel  << 
2237  * @num: number of pages in page array        << 
2238  * @offset: user's requested vm_pgoff         << 
2239  *                                            << 
2240  * This allows drivers to map range of kernel << 
2241  * The zeropage is supported in some VMAs, se << 
2242  * vm_mixed_zeropage_allowed().               << 
2243  *                                            << 
2244  * Return: 0 on success and error code otherw << 
2245  */                                           << 
2246 static int __vm_map_pages(struct vm_area_stru << 
2247                                 unsigned long << 
2248 {                                             << 
2249         unsigned long count = vma_pages(vma); << 
2250         unsigned long uaddr = vma->vm_start;  << 
2251         int ret, i;                           << 
2252                                               << 
2253         /* Fail if the user requested offset  << 
2254         if (offset >= num)                    << 
2255                 return -ENXIO;                << 
2256                                               << 
2257         /* Fail if the user requested size ex << 
2258         if (count > num - offset)             << 
2259                 return -ENXIO;                << 
2260                                               << 
2261         for (i = 0; i < count; i++) {         << 
2262                 ret = vm_insert_page(vma, uad << 
2263                 if (ret < 0)                  << 
2264                         return ret;           << 
2265                 uaddr += PAGE_SIZE;           << 
2266         }                                     << 
2267                                               << 
2268         return 0;                             << 
2269 }                                             << 
2270                                               << 
2271 /**                                           << 
2272  * vm_map_pages - maps range of kernel pages  << 
2273  * @vma: user vma to map to                   << 
2274  * @pages: pointer to array of source kernel  << 
2275  * @num: number of pages in page array        << 
2276  *                                            << 
2277  * Maps an object consisting of @num pages, c << 
2278  * requested vm_pgoff                         << 
2279  *                                            << 
2280  * If we fail to insert any page into the vma << 
2281  * immediately leaving any previously inserte << 
2282  * from the mmap handler may immediately retu << 
2283  * will destroy the vma, removing any success << 
2284  * callers should make their own arrangements << 
2285  *                                            << 
2286  * Context: Process context. Called by mmap h << 
2287  * Return: 0 on success and error code otherw << 
2288  */                                           << 
2289 int vm_map_pages(struct vm_area_struct *vma,  << 
2290                                 unsigned long << 
2291 {                                             << 
2292         return __vm_map_pages(vma, pages, num << 
2293 }                                             << 
2294 EXPORT_SYMBOL(vm_map_pages);                  << 
2295                                               << 
2296 /**                                           << 
2297  * vm_map_pages_zero - map range of kernel pa << 
2298  * @vma: user vma to map to                   << 
2299  * @pages: pointer to array of source kernel  << 
2300  * @num: number of pages in page array        << 
2301  *                                            << 
2302  * Similar to vm_map_pages(), except that it  << 
2303  * to 0. This function is intended for the dr << 
2304  * vm_pgoff.                                  << 
2305  *                                            << 
2306  * Context: Process context. Called by mmap h << 
2307  * Return: 0 on success and error code otherw << 
2308  */                                           << 
2309 int vm_map_pages_zero(struct vm_area_struct * << 
2310                                 unsigned long << 
2311 {                                             << 
2312         return __vm_map_pages(vma, pages, num << 
2313 }                                             << 
2314 EXPORT_SYMBOL(vm_map_pages_zero);             << 
2315                                               << 
2316 static vm_fault_t insert_pfn(struct vm_area_s << 
2317                         pfn_t pfn, pgprot_t p    1787                         pfn_t pfn, pgprot_t prot, bool mkwrite)
2318 {                                                1788 {
2319         struct mm_struct *mm = vma->vm_mm;       1789         struct mm_struct *mm = vma->vm_mm;
                                                   >> 1790         int retval;
2320         pte_t *pte, entry;                       1791         pte_t *pte, entry;
2321         spinlock_t *ptl;                         1792         spinlock_t *ptl;
2322                                                  1793 
                                                   >> 1794         retval = -ENOMEM;
2323         pte = get_locked_pte(mm, addr, &ptl);    1795         pte = get_locked_pte(mm, addr, &ptl);
2324         if (!pte)                                1796         if (!pte)
2325                 return VM_FAULT_OOM;          !! 1797                 goto out;
2326         entry = ptep_get(pte);                !! 1798         retval = -EBUSY;
2327         if (!pte_none(entry)) {               !! 1799         if (!pte_none(*pte)) {
2328                 if (mkwrite) {                   1800                 if (mkwrite) {
2329                         /*                       1801                         /*
2330                          * For read faults on    1802                          * For read faults on private mappings the PFN passed
2331                          * in may not match t    1803                          * in may not match the PFN we have mapped if the
2332                          * mapped PFN is a wr    1804                          * mapped PFN is a writeable COW page.  In the mkwrite
2333                          * case we are creati    1805                          * case we are creating a writable PTE for a shared
2334                          * mapping and we exp !! 1806                          * mapping and we expect the PFNs to match.
2335                          * don't match, we ar << 
2336                          * allocation and map << 
2337                          * update.            << 
2338                          */                      1807                          */
2339                         if (pte_pfn(entry) != !! 1808                         if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
2340                                 WARN_ON_ONCE( << 
2341                                 goto out_unlo    1809                                 goto out_unlock;
2342                         }                     !! 1810                         entry = *pte;
2343                         entry = pte_mkyoung(e !! 1811                         goto out_mkwrite;
2344                         entry = maybe_mkwrite !! 1812                 } else
2345                         if (ptep_set_access_f !! 1813                         goto out_unlock;
2346                                 update_mmu_ca << 
2347                 }                             << 
2348                 goto out_unlock;              << 
2349         }                                        1814         }
2350                                                  1815 
2351         /* Ok, finally just insert the thing.    1816         /* Ok, finally just insert the thing.. */
2352         if (pfn_t_devmap(pfn))                   1817         if (pfn_t_devmap(pfn))
2353                 entry = pte_mkdevmap(pfn_t_pt    1818                 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2354         else                                     1819         else
2355                 entry = pte_mkspecial(pfn_t_p    1820                 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2356                                                  1821 
                                                   >> 1822 out_mkwrite:
2357         if (mkwrite) {                           1823         if (mkwrite) {
2358                 entry = pte_mkyoung(entry);      1824                 entry = pte_mkyoung(entry);
2359                 entry = maybe_mkwrite(pte_mkd    1825                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2360         }                                        1826         }
2361                                                  1827 
2362         set_pte_at(mm, addr, pte, entry);        1828         set_pte_at(mm, addr, pte, entry);
2363         update_mmu_cache(vma, addr, pte); /*     1829         update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2364                                                  1830 
                                                   >> 1831         retval = 0;
2365 out_unlock:                                      1832 out_unlock:
2366         pte_unmap_unlock(pte, ptl);              1833         pte_unmap_unlock(pte, ptl);
2367         return VM_FAULT_NOPAGE;               !! 1834 out:
                                                   >> 1835         return retval;
2368 }                                                1836 }
2369                                                  1837 
2370 /**                                              1838 /**
2371  * vmf_insert_pfn_prot - insert single pfn in !! 1839  * vm_insert_pfn - insert single pfn into user vma
                                                   >> 1840  * @vma: user vma to map to
                                                   >> 1841  * @addr: target user address of this page
                                                   >> 1842  * @pfn: source kernel pfn
                                                   >> 1843  *
                                                   >> 1844  * Similar to vm_insert_page, this allows drivers to insert individual pages
                                                   >> 1845  * they've allocated into a user vma. Same comments apply.
                                                   >> 1846  *
                                                   >> 1847  * This function should only be called from a vm_ops->fault handler, and
                                                   >> 1848  * in that case the handler should return NULL.
                                                   >> 1849  *
                                                   >> 1850  * vma cannot be a COW mapping.
                                                   >> 1851  *
                                                   >> 1852  * As this is called only for pages that do not currently exist, we
                                                   >> 1853  * do not need to flush old virtual caches or the TLB.
                                                   >> 1854  */
                                                   >> 1855 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                                                   >> 1856                         unsigned long pfn)
                                                   >> 1857 {
                                                   >> 1858         return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
                                                   >> 1859 }
                                                   >> 1860 EXPORT_SYMBOL(vm_insert_pfn);
                                                   >> 1861 
                                                   >> 1862 /**
                                                   >> 1863  * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2372  * @vma: user vma to map to                      1864  * @vma: user vma to map to
2373  * @addr: target user address of this page       1865  * @addr: target user address of this page
2374  * @pfn: source kernel pfn                       1866  * @pfn: source kernel pfn
2375  * @pgprot: pgprot flags for the inserted pag    1867  * @pgprot: pgprot flags for the inserted page
2376  *                                               1868  *
2377  * This is exactly like vmf_insert_pfn(), exc !! 1869  * This is exactly like vm_insert_pfn, except that it allows drivers to
2378  * to override pgprot on a per-page basis.       1870  * to override pgprot on a per-page basis.
2379  *                                               1871  *
2380  * This only makes sense for IO mappings, and    1872  * This only makes sense for IO mappings, and it makes no sense for
2381  * COW mappings.  In general, using multiple  !! 1873  * cow mappings.  In general, using multiple vmas is preferable;
2382  * vmf_insert_pfn_prot should only be used if !! 1874  * vm_insert_pfn_prot should only be used if using multiple VMAs is
2383  * impractical.                                  1875  * impractical.
2384  *                                            << 
2385  * pgprot typically only differs from @vma->v << 
2386  * caching- and encryption bits different tha << 
2387  * because the caching- or encryption mode ma << 
2388  *                                            << 
2389  * This is ok as long as @vma->vm_page_prot i << 
2390  * to set caching and encryption bits for tho << 
2391  * This is ensured by core vm only modifying  << 
2392  * functions that don't touch caching- or enc << 
2393  * if needed. (See for example mprotect()).   << 
2394  *                                            << 
2395  * Also when new page-table entries are creat << 
2396  * fault() callback, and never using the valu << 
2397  * except for page-table entries that point t << 
2398  * of COW.                                    << 
2399  *                                            << 
2400  * Context: Process context.  May allocate us << 
2401  * Return: vm_fault_t value.                  << 
2402  */                                              1876  */
2403 vm_fault_t vmf_insert_pfn_prot(struct vm_area !! 1877 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2404                         unsigned long pfn, pg    1878                         unsigned long pfn, pgprot_t pgprot)
2405 {                                                1879 {
                                                   >> 1880         int ret;
2406         /*                                       1881         /*
2407          * Technically, architectures with pt    1882          * Technically, architectures with pte_special can avoid all these
2408          * restrictions (same for remap_pfn_r    1883          * restrictions (same for remap_pfn_range).  However we would like
2409          * consistency in testing and feature    1884          * consistency in testing and feature parity among all, so we should
2410          * try to keep these invariants in pl    1885          * try to keep these invariants in place for everybody.
2411          */                                      1886          */
2412         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|V    1887         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2413         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM    1888         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2414                                                  1889                                                 (VM_PFNMAP|VM_MIXEDMAP));
2415         BUG_ON((vma->vm_flags & VM_PFNMAP) &&    1890         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2416         BUG_ON((vma->vm_flags & VM_MIXEDMAP)     1891         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2417                                                  1892 
2418         if (addr < vma->vm_start || addr >= v    1893         if (addr < vma->vm_start || addr >= vma->vm_end)
2419                 return VM_FAULT_SIGBUS;       !! 1894                 return -EFAULT;
2420                                                  1895 
2421         if (!pfn_modify_allowed(pfn, pgprot))    1896         if (!pfn_modify_allowed(pfn, pgprot))
2422                 return VM_FAULT_SIGBUS;       !! 1897                 return -EACCES;
2423                                                  1898 
2424         track_pfn_insert(vma, &pgprot, __pfn_    1899         track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2425                                                  1900 
2426         return insert_pfn(vma, addr, __pfn_to !! 1901         ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2427                         false);                  1902                         false);
2428 }                                             << 
2429 EXPORT_SYMBOL(vmf_insert_pfn_prot);           << 
2430                                                  1903 
2431 /**                                           !! 1904         return ret;
2432  * vmf_insert_pfn - insert single pfn into us << 
2433  * @vma: user vma to map to                   << 
2434  * @addr: target user address of this page    << 
2435  * @pfn: source kernel pfn                    << 
2436  *                                            << 
2437  * Similar to vm_insert_page, this allows dri << 
2438  * they've allocated into a user vma. Same co << 
2439  *                                            << 
2440  * This function should only be called from a << 
2441  * in that case the handler should return the << 
2442  *                                            << 
2443  * vma cannot be a COW mapping.               << 
2444  *                                            << 
2445  * As this is called only for pages that do n << 
2446  * do not need to flush old virtual caches or << 
2447  *                                            << 
2448  * Context: Process context.  May allocate us << 
2449  * Return: vm_fault_t value.                  << 
2450  */                                           << 
2451 vm_fault_t vmf_insert_pfn(struct vm_area_stru << 
2452                         unsigned long pfn)    << 
2453 {                                             << 
2454         return vmf_insert_pfn_prot(vma, addr, << 
2455 }                                                1905 }
2456 EXPORT_SYMBOL(vmf_insert_pfn);                !! 1906 EXPORT_SYMBOL(vm_insert_pfn_prot);
2457                                                  1907 
2458 static bool vm_mixed_ok(struct vm_area_struct !! 1908 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2459 {                                                1909 {
2460         if (unlikely(is_zero_pfn(pfn_t_to_pfn << 
2461             (mkwrite || !vm_mixed_zeropage_al << 
2462                 return false;                 << 
2463         /* these checks mirror the abort cond    1910         /* these checks mirror the abort conditions in vm_normal_page */
2464         if (vma->vm_flags & VM_MIXEDMAP)         1911         if (vma->vm_flags & VM_MIXEDMAP)
2465                 return true;                     1912                 return true;
2466         if (pfn_t_devmap(pfn))                   1913         if (pfn_t_devmap(pfn))
2467                 return true;                     1914                 return true;
2468         if (pfn_t_special(pfn))                  1915         if (pfn_t_special(pfn))
2469                 return true;                     1916                 return true;
2470         if (is_zero_pfn(pfn_t_to_pfn(pfn)))      1917         if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2471                 return true;                     1918                 return true;
2472         return false;                            1919         return false;
2473 }                                                1920 }
2474                                                  1921 
2475 static vm_fault_t __vm_insert_mixed(struct vm !! 1922 static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2476                 unsigned long addr, pfn_t pfn !! 1923                         pfn_t pfn, bool mkwrite)
2477 {                                                1924 {
2478         pgprot_t pgprot = vma->vm_page_prot;     1925         pgprot_t pgprot = vma->vm_page_prot;
2479         int err;                              << 
2480                                                  1926 
2481         if (!vm_mixed_ok(vma, pfn, mkwrite))  !! 1927         BUG_ON(!vm_mixed_ok(vma, pfn));
2482                 return VM_FAULT_SIGBUS;       << 
2483                                                  1928 
2484         if (addr < vma->vm_start || addr >= v    1929         if (addr < vma->vm_start || addr >= vma->vm_end)
2485                 return VM_FAULT_SIGBUS;       !! 1930                 return -EFAULT;
2486                                                  1931 
2487         track_pfn_insert(vma, &pgprot, pfn);     1932         track_pfn_insert(vma, &pgprot, pfn);
2488                                                  1933 
2489         if (!pfn_modify_allowed(pfn_t_to_pfn(    1934         if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2490                 return VM_FAULT_SIGBUS;       !! 1935                 return -EACCES;
2491                                                  1936 
2492         /*                                       1937         /*
2493          * If we don't have pte special, then    1938          * If we don't have pte special, then we have to use the pfn_valid()
2494          * based VM_MIXEDMAP scheme (see vm_n    1939          * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2495          * refcount the page if pfn_valid is     1940          * refcount the page if pfn_valid is true (hence insert_page rather
2496          * than insert_pfn).  If a zero_pfn w    1941          * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2497          * without pte special, it would ther    1942          * without pte special, it would there be refcounted as a normal page.
2498          */                                      1943          */
2499         if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_S    1944         if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2500             !pfn_t_devmap(pfn) && pfn_t_valid    1945             !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2501                 struct page *page;               1946                 struct page *page;
2502                                                  1947 
2503                 /*                               1948                 /*
2504                  * At this point we are commi    1949                  * At this point we are committed to insert_page()
2505                  * regardless of whether the     1950                  * regardless of whether the caller specified flags that
2506                  * result in pfn_t_has_page()    1951                  * result in pfn_t_has_page() == false.
2507                  */                              1952                  */
2508                 page = pfn_to_page(pfn_t_to_p    1953                 page = pfn_to_page(pfn_t_to_pfn(pfn));
2509                 err = insert_page(vma, addr,  !! 1954                 return insert_page(vma, addr, page, pgprot);
2510         } else {                              << 
2511                 return insert_pfn(vma, addr,  << 
2512         }                                        1955         }
2513                                               !! 1956         return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2514         if (err == -ENOMEM)                   << 
2515                 return VM_FAULT_OOM;          << 
2516         if (err < 0 && err != -EBUSY)         << 
2517                 return VM_FAULT_SIGBUS;       << 
2518                                               << 
2519         return VM_FAULT_NOPAGE;               << 
2520 }                                                1957 }
2521                                                  1958 
2522 vm_fault_t vmf_insert_mixed(struct vm_area_st !! 1959 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2523                 pfn_t pfn)                    !! 1960                         pfn_t pfn)
2524 {                                                1961 {
2525         return __vm_insert_mixed(vma, addr, p    1962         return __vm_insert_mixed(vma, addr, pfn, false);
                                                   >> 1963 
2526 }                                                1964 }
2527 EXPORT_SYMBOL(vmf_insert_mixed);              !! 1965 EXPORT_SYMBOL(vm_insert_mixed);
2528                                                  1966 
2529 /*                                               1967 /*
2530  *  If the insertion of PTE failed because so    1968  *  If the insertion of PTE failed because someone else already added a
2531  *  different entry in the mean time, we trea    1969  *  different entry in the mean time, we treat that as success as we assume
2532  *  the same entry was actually inserted.        1970  *  the same entry was actually inserted.
2533  */                                              1971  */
                                                   >> 1972 
2534 vm_fault_t vmf_insert_mixed_mkwrite(struct vm    1973 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2535                 unsigned long addr, pfn_t pfn    1974                 unsigned long addr, pfn_t pfn)
2536 {                                                1975 {
2537         return __vm_insert_mixed(vma, addr, p !! 1976         int err;
                                                   >> 1977 
                                                   >> 1978         err =  __vm_insert_mixed(vma, addr, pfn, true);
                                                   >> 1979         if (err == -ENOMEM)
                                                   >> 1980                 return VM_FAULT_OOM;
                                                   >> 1981         if (err < 0 && err != -EBUSY)
                                                   >> 1982                 return VM_FAULT_SIGBUS;
                                                   >> 1983         return VM_FAULT_NOPAGE;
2538 }                                                1984 }
                                                   >> 1985 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2539                                                  1986 
2540 /*                                               1987 /*
2541  * maps a range of physical memory into the r    1988  * maps a range of physical memory into the requested pages. the old
2542  * mappings are removed. any references to no    1989  * mappings are removed. any references to nonexistent pages results
2543  * in null mappings (currently treated as "co    1990  * in null mappings (currently treated as "copy-on-access")
2544  */                                              1991  */
2545 static int remap_pte_range(struct mm_struct *    1992 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2546                         unsigned long addr, u    1993                         unsigned long addr, unsigned long end,
2547                         unsigned long pfn, pg    1994                         unsigned long pfn, pgprot_t prot)
2548 {                                                1995 {
2549         pte_t *pte, *mapped_pte;              !! 1996         pte_t *pte;
2550         spinlock_t *ptl;                         1997         spinlock_t *ptl;
2551         int err = 0;                             1998         int err = 0;
2552                                                  1999 
2553         mapped_pte = pte = pte_alloc_map_lock !! 2000         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2554         if (!pte)                                2001         if (!pte)
2555                 return -ENOMEM;                  2002                 return -ENOMEM;
2556         arch_enter_lazy_mmu_mode();              2003         arch_enter_lazy_mmu_mode();
2557         do {                                     2004         do {
2558                 BUG_ON(!pte_none(ptep_get(pte !! 2005                 BUG_ON(!pte_none(*pte));
2559                 if (!pfn_modify_allowed(pfn,     2006                 if (!pfn_modify_allowed(pfn, prot)) {
2560                         err = -EACCES;           2007                         err = -EACCES;
2561                         break;                   2008                         break;
2562                 }                                2009                 }
2563                 set_pte_at(mm, addr, pte, pte    2010                 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2564                 pfn++;                           2011                 pfn++;
2565         } while (pte++, addr += PAGE_SIZE, ad    2012         } while (pte++, addr += PAGE_SIZE, addr != end);
2566         arch_leave_lazy_mmu_mode();              2013         arch_leave_lazy_mmu_mode();
2567         pte_unmap_unlock(mapped_pte, ptl);    !! 2014         pte_unmap_unlock(pte - 1, ptl);
2568         return err;                              2015         return err;
2569 }                                                2016 }
2570                                                  2017 
2571 static inline int remap_pmd_range(struct mm_s    2018 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2572                         unsigned long addr, u    2019                         unsigned long addr, unsigned long end,
2573                         unsigned long pfn, pg    2020                         unsigned long pfn, pgprot_t prot)
2574 {                                                2021 {
2575         pmd_t *pmd;                              2022         pmd_t *pmd;
2576         unsigned long next;                      2023         unsigned long next;
2577         int err;                                 2024         int err;
2578                                                  2025 
2579         pfn -= addr >> PAGE_SHIFT;               2026         pfn -= addr >> PAGE_SHIFT;
2580         pmd = pmd_alloc(mm, pud, addr);          2027         pmd = pmd_alloc(mm, pud, addr);
2581         if (!pmd)                                2028         if (!pmd)
2582                 return -ENOMEM;                  2029                 return -ENOMEM;
2583         VM_BUG_ON(pmd_trans_huge(*pmd));         2030         VM_BUG_ON(pmd_trans_huge(*pmd));
2584         do {                                     2031         do {
2585                 next = pmd_addr_end(addr, end    2032                 next = pmd_addr_end(addr, end);
2586                 err = remap_pte_range(mm, pmd    2033                 err = remap_pte_range(mm, pmd, addr, next,
2587                                 pfn + (addr >    2034                                 pfn + (addr >> PAGE_SHIFT), prot);
2588                 if (err)                         2035                 if (err)
2589                         return err;              2036                         return err;
2590         } while (pmd++, addr = next, addr !=     2037         } while (pmd++, addr = next, addr != end);
2591         return 0;                                2038         return 0;
2592 }                                                2039 }
2593                                                  2040 
2594 static inline int remap_pud_range(struct mm_s    2041 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2595                         unsigned long addr, u    2042                         unsigned long addr, unsigned long end,
2596                         unsigned long pfn, pg    2043                         unsigned long pfn, pgprot_t prot)
2597 {                                                2044 {
2598         pud_t *pud;                              2045         pud_t *pud;
2599         unsigned long next;                      2046         unsigned long next;
2600         int err;                                 2047         int err;
2601                                                  2048 
2602         pfn -= addr >> PAGE_SHIFT;               2049         pfn -= addr >> PAGE_SHIFT;
2603         pud = pud_alloc(mm, p4d, addr);          2050         pud = pud_alloc(mm, p4d, addr);
2604         if (!pud)                                2051         if (!pud)
2605                 return -ENOMEM;                  2052                 return -ENOMEM;
2606         do {                                     2053         do {
2607                 next = pud_addr_end(addr, end    2054                 next = pud_addr_end(addr, end);
2608                 err = remap_pmd_range(mm, pud    2055                 err = remap_pmd_range(mm, pud, addr, next,
2609                                 pfn + (addr >    2056                                 pfn + (addr >> PAGE_SHIFT), prot);
2610                 if (err)                         2057                 if (err)
2611                         return err;              2058                         return err;
2612         } while (pud++, addr = next, addr !=     2059         } while (pud++, addr = next, addr != end);
2613         return 0;                                2060         return 0;
2614 }                                                2061 }
2615                                                  2062 
2616 static inline int remap_p4d_range(struct mm_s    2063 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2617                         unsigned long addr, u    2064                         unsigned long addr, unsigned long end,
2618                         unsigned long pfn, pg    2065                         unsigned long pfn, pgprot_t prot)
2619 {                                                2066 {
2620         p4d_t *p4d;                              2067         p4d_t *p4d;
2621         unsigned long next;                      2068         unsigned long next;
2622         int err;                                 2069         int err;
2623                                                  2070 
2624         pfn -= addr >> PAGE_SHIFT;               2071         pfn -= addr >> PAGE_SHIFT;
2625         p4d = p4d_alloc(mm, pgd, addr);          2072         p4d = p4d_alloc(mm, pgd, addr);
2626         if (!p4d)                                2073         if (!p4d)
2627                 return -ENOMEM;                  2074                 return -ENOMEM;
2628         do {                                     2075         do {
2629                 next = p4d_addr_end(addr, end    2076                 next = p4d_addr_end(addr, end);
2630                 err = remap_pud_range(mm, p4d    2077                 err = remap_pud_range(mm, p4d, addr, next,
2631                                 pfn + (addr >    2078                                 pfn + (addr >> PAGE_SHIFT), prot);
2632                 if (err)                         2079                 if (err)
2633                         return err;              2080                         return err;
2634         } while (p4d++, addr = next, addr !=     2081         } while (p4d++, addr = next, addr != end);
2635         return 0;                                2082         return 0;
2636 }                                                2083 }
2637                                                  2084 
2638 static int remap_pfn_range_internal(struct vm !! 2085 /**
2639                 unsigned long pfn, unsigned l !! 2086  * remap_pfn_range - remap kernel memory to userspace
                                                   >> 2087  * @vma: user vma to map to
                                                   >> 2088  * @addr: target user address to start at
                                                   >> 2089  * @pfn: physical address of kernel memory
                                                   >> 2090  * @size: size of map area
                                                   >> 2091  * @prot: page protection flags for this mapping
                                                   >> 2092  *
                                                   >> 2093  *  Note: this is only safe if the mm semaphore is held when called.
                                                   >> 2094  */
                                                   >> 2095 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                                                   >> 2096                     unsigned long pfn, unsigned long size, pgprot_t prot)
2640 {                                                2097 {
2641         pgd_t *pgd;                              2098         pgd_t *pgd;
2642         unsigned long next;                      2099         unsigned long next;
2643         unsigned long end = addr + PAGE_ALIGN    2100         unsigned long end = addr + PAGE_ALIGN(size);
2644         struct mm_struct *mm = vma->vm_mm;       2101         struct mm_struct *mm = vma->vm_mm;
                                                   >> 2102         unsigned long remap_pfn = pfn;
2645         int err;                                 2103         int err;
2646                                                  2104 
2647         if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)) << 
2648                 return -EINVAL;               << 
2649                                               << 
2650         /*                                       2105         /*
2651          * Physically remapped pages are spec    2106          * Physically remapped pages are special. Tell the
2652          * rest of the world about it:           2107          * rest of the world about it:
2653          *   VM_IO tells people not to look a    2108          *   VM_IO tells people not to look at these pages
2654          *      (accesses can have side effec    2109          *      (accesses can have side effects).
2655          *   VM_PFNMAP tells the core MM that    2110          *   VM_PFNMAP tells the core MM that the base pages are just
2656          *      raw PFN mappings, and do not     2111          *      raw PFN mappings, and do not have a "struct page" associated
2657          *      with them.                       2112          *      with them.
2658          *   VM_DONTEXPAND                       2113          *   VM_DONTEXPAND
2659          *      Disable vma merging and expan    2114          *      Disable vma merging and expanding with mremap().
2660          *   VM_DONTDUMP                         2115          *   VM_DONTDUMP
2661          *      Omit vma from core dump, even    2116          *      Omit vma from core dump, even when VM_IO turned off.
2662          *                                       2117          *
2663          * There's a horrible special case to    2118          * There's a horrible special case to handle copy-on-write
2664          * behaviour that some programs depen    2119          * behaviour that some programs depend on. We mark the "original"
2665          * un-COW'ed pages by matching them u    2120          * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2666          * See vm_normal_page() for details.     2121          * See vm_normal_page() for details.
2667          */                                      2122          */
2668         if (is_cow_mapping(vma->vm_flags)) {     2123         if (is_cow_mapping(vma->vm_flags)) {
2669                 if (addr != vma->vm_start ||     2124                 if (addr != vma->vm_start || end != vma->vm_end)
2670                         return -EINVAL;          2125                         return -EINVAL;
2671                 vma->vm_pgoff = pfn;             2126                 vma->vm_pgoff = pfn;
2672         }                                        2127         }
2673                                                  2128 
2674         vm_flags_set(vma, VM_IO | VM_PFNMAP | !! 2129         err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
                                                   >> 2130         if (err)
                                                   >> 2131                 return -EINVAL;
                                                   >> 2132 
                                                   >> 2133         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2675                                                  2134 
2676         BUG_ON(addr >= end);                     2135         BUG_ON(addr >= end);
2677         pfn -= addr >> PAGE_SHIFT;               2136         pfn -= addr >> PAGE_SHIFT;
2678         pgd = pgd_offset(mm, addr);              2137         pgd = pgd_offset(mm, addr);
2679         flush_cache_range(vma, addr, end);       2138         flush_cache_range(vma, addr, end);
2680         do {                                     2139         do {
2681                 next = pgd_addr_end(addr, end    2140                 next = pgd_addr_end(addr, end);
2682                 err = remap_p4d_range(mm, pgd    2141                 err = remap_p4d_range(mm, pgd, addr, next,
2683                                 pfn + (addr >    2142                                 pfn + (addr >> PAGE_SHIFT), prot);
2684                 if (err)                         2143                 if (err)
2685                         return err;           !! 2144                         break;
2686         } while (pgd++, addr = next, addr !=     2145         } while (pgd++, addr = next, addr != end);
2687                                                  2146 
2688         return 0;                             << 
2689 }                                             << 
2690                                               << 
2691 /*                                            << 
2692  * Variant of remap_pfn_range that does not c << 
2693  * must have pre-validated the caching bits o << 
2694  */                                           << 
2695 int remap_pfn_range_notrack(struct vm_area_st << 
2696                 unsigned long pfn, unsigned l << 
2697 {                                             << 
2698         int error = remap_pfn_range_internal( << 
2699                                               << 
2700         if (!error)                           << 
2701                 return 0;                     << 
2702                                               << 
2703         /*                                    << 
2704          * A partial pfn range mapping is dan << 
2705          * maintain page reference counts, an << 
2706          * pages due to the error. So zap it  << 
2707          */                                   << 
2708         zap_page_range_single(vma, addr, size << 
2709         return error;                         << 
2710 }                                             << 
2711                                               << 
2712 /**                                           << 
2713  * remap_pfn_range - remap kernel memory to u << 
2714  * @vma: user vma to map to                   << 
2715  * @addr: target page aligned user address to << 
2716  * @pfn: page frame number of kernel physical << 
2717  * @size: size of mapping area                << 
2718  * @prot: page protection flags for this mapp << 
2719  *                                            << 
2720  * Note: this is only safe if the mm semaphor << 
2721  *                                            << 
2722  * Return: %0 on success, negative error code << 
2723  */                                           << 
2724 int remap_pfn_range(struct vm_area_struct *vm << 
2725                     unsigned long pfn, unsign << 
2726 {                                             << 
2727         int err;                              << 
2728                                               << 
2729         err = track_pfn_remap(vma, &prot, pfn << 
2730         if (err)                                 2147         if (err)
2731                 return -EINVAL;               !! 2148                 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
2732                                                  2149 
2733         err = remap_pfn_range_notrack(vma, ad << 
2734         if (err)                              << 
2735                 untrack_pfn(vma, pfn, PAGE_AL << 
2736         return err;                              2150         return err;
2737 }                                                2151 }
2738 EXPORT_SYMBOL(remap_pfn_range);                  2152 EXPORT_SYMBOL(remap_pfn_range);
2739                                                  2153 
2740 /**                                              2154 /**
2741  * vm_iomap_memory - remap memory to userspac    2155  * vm_iomap_memory - remap memory to userspace
2742  * @vma: user vma to map to                      2156  * @vma: user vma to map to
2743  * @start: start of the physical memory to be !! 2157  * @start: start of area
2744  * @len: size of area                            2158  * @len: size of area
2745  *                                               2159  *
2746  * This is a simplified io_remap_pfn_range()     2160  * This is a simplified io_remap_pfn_range() for common driver use. The
2747  * driver just needs to give us the physical     2161  * driver just needs to give us the physical memory range to be mapped,
2748  * we'll figure out the rest from the vma inf    2162  * we'll figure out the rest from the vma information.
2749  *                                               2163  *
2750  * NOTE! Some drivers might want to tweak vma    2164  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2751  * whatever write-combining details or simila    2165  * whatever write-combining details or similar.
2752  *                                            << 
2753  * Return: %0 on success, negative error code << 
2754  */                                              2166  */
2755 int vm_iomap_memory(struct vm_area_struct *vm    2167 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2756 {                                                2168 {
2757         unsigned long vm_len, pfn, pages;        2169         unsigned long vm_len, pfn, pages;
2758                                                  2170 
2759         /* Check that the physical memory are    2171         /* Check that the physical memory area passed in looks valid */
2760         if (start + len < start)                 2172         if (start + len < start)
2761                 return -EINVAL;                  2173                 return -EINVAL;
2762         /*                                       2174         /*
2763          * You *really* shouldn't map things     2175          * You *really* shouldn't map things that aren't page-aligned,
2764          * but we've historically allowed it     2176          * but we've historically allowed it because IO memory might
2765          * just have smaller alignment.          2177          * just have smaller alignment.
2766          */                                      2178          */
2767         len += start & ~PAGE_MASK;               2179         len += start & ~PAGE_MASK;
2768         pfn = start >> PAGE_SHIFT;               2180         pfn = start >> PAGE_SHIFT;
2769         pages = (len + ~PAGE_MASK) >> PAGE_SH    2181         pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2770         if (pfn + pages < pfn)                   2182         if (pfn + pages < pfn)
2771                 return -EINVAL;                  2183                 return -EINVAL;
2772                                                  2184 
2773         /* We start the mapping 'vm_pgoff' pa    2185         /* We start the mapping 'vm_pgoff' pages into the area */
2774         if (vma->vm_pgoff > pages)               2186         if (vma->vm_pgoff > pages)
2775                 return -EINVAL;                  2187                 return -EINVAL;
2776         pfn += vma->vm_pgoff;                    2188         pfn += vma->vm_pgoff;
2777         pages -= vma->vm_pgoff;                  2189         pages -= vma->vm_pgoff;
2778                                                  2190 
2779         /* Can we fit all of the mapping? */     2191         /* Can we fit all of the mapping? */
2780         vm_len = vma->vm_end - vma->vm_start;    2192         vm_len = vma->vm_end - vma->vm_start;
2781         if (vm_len >> PAGE_SHIFT > pages)        2193         if (vm_len >> PAGE_SHIFT > pages)
2782                 return -EINVAL;                  2194                 return -EINVAL;
2783                                                  2195 
2784         /* Ok, let it rip */                     2196         /* Ok, let it rip */
2785         return io_remap_pfn_range(vma, vma->v    2197         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2786 }                                                2198 }
2787 EXPORT_SYMBOL(vm_iomap_memory);                  2199 EXPORT_SYMBOL(vm_iomap_memory);
2788                                                  2200 
2789 static int apply_to_pte_range(struct mm_struc    2201 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2790                                      unsigned    2202                                      unsigned long addr, unsigned long end,
2791                                      pte_fn_t !! 2203                                      pte_fn_t fn, void *data)
2792                                      pgtbl_mo << 
2793 {                                                2204 {
2794         pte_t *pte, *mapped_pte;              !! 2205         pte_t *pte;
2795         int err = 0;                          !! 2206         int err;
2796         spinlock_t *ptl;                      !! 2207         pgtable_t token;
                                                   >> 2208         spinlock_t *uninitialized_var(ptl);
2797                                                  2209 
2798         if (create) {                         !! 2210         pte = (mm == &init_mm) ?
2799                 mapped_pte = pte = (mm == &in !! 2211                 pte_alloc_kernel(pmd, addr) :
2800                         pte_alloc_kernel_trac !! 2212                 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2801                         pte_alloc_map_lock(mm !! 2213         if (!pte)
2802                 if (!pte)                     !! 2214                 return -ENOMEM;
2803                         return -ENOMEM;       !! 2215 
2804         } else {                              !! 2216         BUG_ON(pmd_huge(*pmd));
2805                 mapped_pte = pte = (mm == &in << 
2806                         pte_offset_kernel(pmd << 
2807                         pte_offset_map_lock(m << 
2808                 if (!pte)                     << 
2809                         return -EINVAL;       << 
2810         }                                     << 
2811                                                  2217 
2812         arch_enter_lazy_mmu_mode();              2218         arch_enter_lazy_mmu_mode();
2813                                                  2219 
2814         if (fn) {                             !! 2220         token = pmd_pgtable(*pmd);
2815                 do {                          !! 2221 
2816                         if (create || !pte_no !! 2222         do {
2817                                 err = fn(pte+ !! 2223                 err = fn(pte++, token, addr, data);
2818                                 if (err)      !! 2224                 if (err)
2819                                         break !! 2225                         break;
2820                         }                     !! 2226         } while (addr += PAGE_SIZE, addr != end);
2821                 } while (addr += PAGE_SIZE, a << 
2822         }                                     << 
2823         *mask |= PGTBL_PTE_MODIFIED;          << 
2824                                                  2227 
2825         arch_leave_lazy_mmu_mode();              2228         arch_leave_lazy_mmu_mode();
2826                                                  2229 
2827         if (mm != &init_mm)                      2230         if (mm != &init_mm)
2828                 pte_unmap_unlock(mapped_pte,  !! 2231                 pte_unmap_unlock(pte-1, ptl);
2829         return err;                              2232         return err;
2830 }                                                2233 }
2831                                                  2234 
2832 static int apply_to_pmd_range(struct mm_struc    2235 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2833                                      unsigned    2236                                      unsigned long addr, unsigned long end,
2834                                      pte_fn_t !! 2237                                      pte_fn_t fn, void *data)
2835                                      pgtbl_mo << 
2836 {                                                2238 {
2837         pmd_t *pmd;                              2239         pmd_t *pmd;
2838         unsigned long next;                      2240         unsigned long next;
2839         int err = 0;                          !! 2241         int err;
2840                                                  2242 
2841         BUG_ON(pud_leaf(*pud));               !! 2243         BUG_ON(pud_huge(*pud));
2842                                                  2244 
2843         if (create) {                         !! 2245         pmd = pmd_alloc(mm, pud, addr);
2844                 pmd = pmd_alloc_track(mm, pud !! 2246         if (!pmd)
2845                 if (!pmd)                     !! 2247                 return -ENOMEM;
2846                         return -ENOMEM;       << 
2847         } else {                              << 
2848                 pmd = pmd_offset(pud, addr);  << 
2849         }                                     << 
2850         do {                                     2248         do {
2851                 next = pmd_addr_end(addr, end    2249                 next = pmd_addr_end(addr, end);
2852                 if (pmd_none(*pmd) && !create !! 2250                 err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
2853                         continue;             << 
2854                 if (WARN_ON_ONCE(pmd_leaf(*pm << 
2855                         return -EINVAL;       << 
2856                 if (!pmd_none(*pmd) && WARN_O << 
2857                         if (!create)          << 
2858                                 continue;     << 
2859                         pmd_clear_bad(pmd);   << 
2860                 }                             << 
2861                 err = apply_to_pte_range(mm,  << 
2862                                          fn,  << 
2863                 if (err)                         2251                 if (err)
2864                         break;                   2252                         break;
2865         } while (pmd++, addr = next, addr !=     2253         } while (pmd++, addr = next, addr != end);
2866                                               << 
2867         return err;                              2254         return err;
2868 }                                                2255 }
2869                                                  2256 
2870 static int apply_to_pud_range(struct mm_struc    2257 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2871                                      unsigned    2258                                      unsigned long addr, unsigned long end,
2872                                      pte_fn_t !! 2259                                      pte_fn_t fn, void *data)
2873                                      pgtbl_mo << 
2874 {                                                2260 {
2875         pud_t *pud;                              2261         pud_t *pud;
2876         unsigned long next;                      2262         unsigned long next;
2877         int err = 0;                          !! 2263         int err;
2878                                                  2264 
2879         if (create) {                         !! 2265         pud = pud_alloc(mm, p4d, addr);
2880                 pud = pud_alloc_track(mm, p4d !! 2266         if (!pud)
2881                 if (!pud)                     !! 2267                 return -ENOMEM;
2882                         return -ENOMEM;       << 
2883         } else {                              << 
2884                 pud = pud_offset(p4d, addr);  << 
2885         }                                     << 
2886         do {                                     2268         do {
2887                 next = pud_addr_end(addr, end    2269                 next = pud_addr_end(addr, end);
2888                 if (pud_none(*pud) && !create !! 2270                 err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
2889                         continue;             << 
2890                 if (WARN_ON_ONCE(pud_leaf(*pu << 
2891                         return -EINVAL;       << 
2892                 if (!pud_none(*pud) && WARN_O << 
2893                         if (!create)          << 
2894                                 continue;     << 
2895                         pud_clear_bad(pud);   << 
2896                 }                             << 
2897                 err = apply_to_pmd_range(mm,  << 
2898                                          fn,  << 
2899                 if (err)                         2271                 if (err)
2900                         break;                   2272                         break;
2901         } while (pud++, addr = next, addr !=     2273         } while (pud++, addr = next, addr != end);
2902                                               << 
2903         return err;                              2274         return err;
2904 }                                                2275 }
2905                                                  2276 
2906 static int apply_to_p4d_range(struct mm_struc    2277 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2907                                      unsigned    2278                                      unsigned long addr, unsigned long end,
2908                                      pte_fn_t !! 2279                                      pte_fn_t fn, void *data)
2909                                      pgtbl_mo << 
2910 {                                                2280 {
2911         p4d_t *p4d;                              2281         p4d_t *p4d;
2912         unsigned long next;                      2282         unsigned long next;
2913         int err = 0;                          !! 2283         int err;
2914                                                  2284 
2915         if (create) {                         !! 2285         p4d = p4d_alloc(mm, pgd, addr);
2916                 p4d = p4d_alloc_track(mm, pgd !! 2286         if (!p4d)
2917                 if (!p4d)                     !! 2287                 return -ENOMEM;
2918                         return -ENOMEM;       << 
2919         } else {                              << 
2920                 p4d = p4d_offset(pgd, addr);  << 
2921         }                                     << 
2922         do {                                     2288         do {
2923                 next = p4d_addr_end(addr, end    2289                 next = p4d_addr_end(addr, end);
2924                 if (p4d_none(*p4d) && !create !! 2290                 err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
2925                         continue;             << 
2926                 if (WARN_ON_ONCE(p4d_leaf(*p4 << 
2927                         return -EINVAL;       << 
2928                 if (!p4d_none(*p4d) && WARN_O << 
2929                         if (!create)          << 
2930                                 continue;     << 
2931                         p4d_clear_bad(p4d);   << 
2932                 }                             << 
2933                 err = apply_to_pud_range(mm,  << 
2934                                          fn,  << 
2935                 if (err)                         2291                 if (err)
2936                         break;                   2292                         break;
2937         } while (p4d++, addr = next, addr !=     2293         } while (p4d++, addr = next, addr != end);
2938                                               << 
2939         return err;                              2294         return err;
2940 }                                                2295 }
2941                                                  2296 
2942 static int __apply_to_page_range(struct mm_st !! 2297 /*
2943                                  unsigned lon !! 2298  * Scan a region of virtual memory, filling in page tables as necessary
2944                                  void *data,  !! 2299  * and calling a provided function on each leaf page table.
                                                   >> 2300  */
                                                   >> 2301 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                                                   >> 2302                         unsigned long size, pte_fn_t fn, void *data)
2945 {                                                2303 {
2946         pgd_t *pgd;                              2304         pgd_t *pgd;
2947         unsigned long start = addr, next;     !! 2305         unsigned long next;
2948         unsigned long end = addr + size;         2306         unsigned long end = addr + size;
2949         pgtbl_mod_mask mask = 0;              !! 2307         int err;
2950         int err = 0;                          << 
2951                                                  2308 
2952         if (WARN_ON(addr >= end))                2309         if (WARN_ON(addr >= end))
2953                 return -EINVAL;                  2310                 return -EINVAL;
2954                                                  2311 
2955         pgd = pgd_offset(mm, addr);              2312         pgd = pgd_offset(mm, addr);
2956         do {                                     2313         do {
2957                 next = pgd_addr_end(addr, end    2314                 next = pgd_addr_end(addr, end);
2958                 if (pgd_none(*pgd) && !create !! 2315                 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
2959                         continue;             << 
2960                 if (WARN_ON_ONCE(pgd_leaf(*pg << 
2961                         return -EINVAL;       << 
2962                 if (!pgd_none(*pgd) && WARN_O << 
2963                         if (!create)          << 
2964                                 continue;     << 
2965                         pgd_clear_bad(pgd);   << 
2966                 }                             << 
2967                 err = apply_to_p4d_range(mm,  << 
2968                                          fn,  << 
2969                 if (err)                         2316                 if (err)
2970                         break;                   2317                         break;
2971         } while (pgd++, addr = next, addr !=     2318         } while (pgd++, addr = next, addr != end);
2972                                                  2319 
2973         if (mask & ARCH_PAGE_TABLE_SYNC_MASK) << 
2974                 arch_sync_kernel_mappings(sta << 
2975                                               << 
2976         return err;                              2320         return err;
2977 }                                                2321 }
2978                                               << 
2979 /*                                            << 
2980  * Scan a region of virtual memory, filling i << 
2981  * and calling a provided function on each le << 
2982  */                                           << 
2983 int apply_to_page_range(struct mm_struct *mm, << 
2984                         unsigned long size, p << 
2985 {                                             << 
2986         return __apply_to_page_range(mm, addr << 
2987 }                                             << 
2988 EXPORT_SYMBOL_GPL(apply_to_page_range);          2322 EXPORT_SYMBOL_GPL(apply_to_page_range);
2989                                                  2323 
2990 /*                                               2324 /*
2991  * Scan a region of virtual memory, calling a << 
2992  * each leaf page table where it exists.      << 
2993  *                                            << 
2994  * Unlike apply_to_page_range, this does _not << 
2995  * where they are absent.                     << 
2996  */                                           << 
2997 int apply_to_existing_page_range(struct mm_st << 
2998                                  unsigned lon << 
2999 {                                             << 
3000         return __apply_to_page_range(mm, addr << 
3001 }                                             << 
3002 EXPORT_SYMBOL_GPL(apply_to_existing_page_rang << 
3003                                               << 
3004 /*                                            << 
3005  * handle_pte_fault chooses page fault handle    2325  * handle_pte_fault chooses page fault handler according to an entry which was
3006  * read non-atomically.  Before making any co    2326  * read non-atomically.  Before making any commitment, on those architectures
3007  * or configurations (e.g. i386 with PAE) whi    2327  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3008  * parts, do_swap_page must check under lock     2328  * parts, do_swap_page must check under lock before unmapping the pte and
3009  * proceeding (but do_wp_page is only called     2329  * proceeding (but do_wp_page is only called after already making such a check;
3010  * and do_anonymous_page can safely check lat    2330  * and do_anonymous_page can safely check later on).
3011  */                                              2331  */
3012 static inline int pte_unmap_same(struct vm_fa !! 2332 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
                                                   >> 2333                                 pte_t *page_table, pte_t orig_pte)
3013 {                                                2334 {
3014         int same = 1;                            2335         int same = 1;
3015 #if defined(CONFIG_SMP) || defined(CONFIG_PRE !! 2336 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
3016         if (sizeof(pte_t) > sizeof(unsigned l    2337         if (sizeof(pte_t) > sizeof(unsigned long)) {
3017                 spin_lock(vmf->ptl);          !! 2338                 spinlock_t *ptl = pte_lockptr(mm, pmd);
3018                 same = pte_same(ptep_get(vmf- !! 2339                 spin_lock(ptl);
3019                 spin_unlock(vmf->ptl);        !! 2340                 same = pte_same(*page_table, orig_pte);
                                                   >> 2341                 spin_unlock(ptl);
3020         }                                        2342         }
3021 #endif                                           2343 #endif
3022         pte_unmap(vmf->pte);                  !! 2344         pte_unmap(page_table);
3023         vmf->pte = NULL;                      << 
3024         return same;                             2345         return same;
3025 }                                                2346 }
3026                                                  2347 
3027 /*                                            !! 2348 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
3028  * Return:                                    << 
3029  *      0:              copied succeeded      << 
3030  *      -EHWPOISON:     copy failed due to hw << 
3031  *      -EAGAIN:        copied failed (some o << 
3032  */                                           << 
3033 static inline int __wp_page_copy_user(struct  << 
3034                                       struct  << 
3035 {                                                2349 {
3036         int ret;                              !! 2350         debug_dma_assert_idle(src);
3037         void *kaddr;                          << 
3038         void __user *uaddr;                   << 
3039         struct vm_area_struct *vma = vmf->vma << 
3040         struct mm_struct *mm = vma->vm_mm;    << 
3041         unsigned long addr = vmf->address;    << 
3042                                               << 
3043         if (likely(src)) {                    << 
3044                 if (copy_mc_user_highpage(dst << 
3045                         return -EHWPOISON;    << 
3046                 return 0;                     << 
3047         }                                     << 
3048                                                  2351 
3049         /*                                       2352         /*
3050          * If the source page was a PFN mappi    2353          * If the source page was a PFN mapping, we don't have
3051          * a "struct page" for it. We do a be    2354          * a "struct page" for it. We do a best-effort copy by
3052          * just copying from the original use    2355          * just copying from the original user address. If that
3053          * fails, we just zero-fill it. Live     2356          * fails, we just zero-fill it. Live with it.
3054          */                                      2357          */
3055         kaddr = kmap_local_page(dst);         !! 2358         if (unlikely(!src)) {
3056         pagefault_disable();                  !! 2359                 void *kaddr = kmap_atomic(dst);
3057         uaddr = (void __user *)(addr & PAGE_M !! 2360                 void __user *uaddr = (void __user *)(va & PAGE_MASK);
3058                                               << 
3059         /*                                    << 
3060          * On architectures with software "ac << 
3061          * take a double page fault, so mark  << 
3062          */                                   << 
3063         vmf->pte = NULL;                      << 
3064         if (!arch_has_hw_pte_young() && !pte_ << 
3065                 pte_t entry;                  << 
3066                                               << 
3067                 vmf->pte = pte_offset_map_loc << 
3068                 if (unlikely(!vmf->pte || !pt << 
3069                         /*                    << 
3070                          * Other thread has a << 
3071                          * and update local t << 
3072                          */                   << 
3073                         if (vmf->pte)         << 
3074                                 update_mmu_tl << 
3075                         ret = -EAGAIN;        << 
3076                         goto pte_unlock;      << 
3077                 }                             << 
3078                                               << 
3079                 entry = pte_mkyoung(vmf->orig << 
3080                 if (ptep_set_access_flags(vma << 
3081                         update_mmu_cache_rang << 
3082         }                                     << 
3083                                               << 
3084         /*                                    << 
3085          * This really shouldn't fail, becaus << 
3086          * in the page tables. But it might j << 
3087          * in which case we just give up and  << 
3088          * zeroes.                            << 
3089          */                                   << 
3090         if (__copy_from_user_inatomic(kaddr,  << 
3091                 if (vmf->pte)                 << 
3092                         goto warn;            << 
3093                                               << 
3094                 /* Re-validate under PTL if t << 
3095                 vmf->pte = pte_offset_map_loc << 
3096                 if (unlikely(!vmf->pte || !pt << 
3097                         /* The PTE changed un << 
3098                         if (vmf->pte)         << 
3099                                 update_mmu_tl << 
3100                         ret = -EAGAIN;        << 
3101                         goto pte_unlock;      << 
3102                 }                             << 
3103                                                  2361 
3104                 /*                               2362                 /*
3105                  * The same page can be mappe !! 2363                  * This really shouldn't fail, because the page is there
3106                  * Try to copy again under PT !! 2364                  * in the page tables. But it might just be unreadable,
                                                   >> 2365                  * in which case we just give up and fill the result with
                                                   >> 2366                  * zeroes.
3107                  */                              2367                  */
3108                 if (__copy_from_user_inatomic !! 2368                 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
3109                         /*                    << 
3110                          * Give a warn in cas << 
3111                          * use-case           << 
3112                          */                   << 
3113 warn:                                         << 
3114                         WARN_ON_ONCE(1);      << 
3115                         clear_page(kaddr);       2369                         clear_page(kaddr);
3116                 }                             !! 2370                 kunmap_atomic(kaddr);
3117         }                                     !! 2371                 flush_dcache_page(dst);
3118                                               !! 2372         } else
3119         ret = 0;                              !! 2373                 copy_user_highpage(dst, src, va, vma);
3120                                               << 
3121 pte_unlock:                                   << 
3122         if (vmf->pte)                         << 
3123                 pte_unmap_unlock(vmf->pte, vm << 
3124         pagefault_enable();                   << 
3125         kunmap_local(kaddr);                  << 
3126         flush_dcache_page(dst);               << 
3127                                               << 
3128         return ret;                           << 
3129 }                                                2374 }
3130                                                  2375 
3131 static gfp_t __get_fault_gfp_mask(struct vm_a    2376 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3132 {                                                2377 {
3133         struct file *vm_file = vma->vm_file;     2378         struct file *vm_file = vma->vm_file;
3134                                                  2379 
3135         if (vm_file)                             2380         if (vm_file)
3136                 return mapping_gfp_mask(vm_fi    2381                 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3137                                                  2382 
3138         /*                                       2383         /*
3139          * Special mappings (e.g. VDSO) do no    2384          * Special mappings (e.g. VDSO) do not have any file so fake
3140          * a default GFP_KERNEL for them.        2385          * a default GFP_KERNEL for them.
3141          */                                      2386          */
3142         return GFP_KERNEL;                       2387         return GFP_KERNEL;
3143 }                                                2388 }
3144                                                  2389 
3145 /*                                               2390 /*
3146  * Notify the address space that the page is     2391  * Notify the address space that the page is about to become writable so that
3147  * it can prohibit this or wait for the page     2392  * it can prohibit this or wait for the page to get into an appropriate state.
3148  *                                               2393  *
3149  * We do this without the lock held, so that     2394  * We do this without the lock held, so that it can sleep if it needs to.
3150  */                                              2395  */
3151 static vm_fault_t do_page_mkwrite(struct vm_f !! 2396 static int do_page_mkwrite(struct vm_fault *vmf)
3152 {                                                2397 {
3153         vm_fault_t ret;                       !! 2398         int ret;
                                                   >> 2399         struct page *page = vmf->page;
3154         unsigned int old_flags = vmf->flags;     2400         unsigned int old_flags = vmf->flags;
3155                                                  2401 
3156         vmf->flags = FAULT_FLAG_WRITE|FAULT_F    2402         vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3157                                                  2403 
3158         if (vmf->vma->vm_file &&              << 
3159             IS_SWAPFILE(vmf->vma->vm_file->f_ << 
3160                 return VM_FAULT_SIGBUS;       << 
3161                                               << 
3162         ret = vmf->vma->vm_ops->page_mkwrite(    2404         ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3163         /* Restore original flags so that cal    2405         /* Restore original flags so that caller is not surprised */
3164         vmf->flags = old_flags;                  2406         vmf->flags = old_flags;
3165         if (unlikely(ret & (VM_FAULT_ERROR |     2407         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3166                 return ret;                      2408                 return ret;
3167         if (unlikely(!(ret & VM_FAULT_LOCKED)    2409         if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3168                 folio_lock(folio);            !! 2410                 lock_page(page);
3169                 if (!folio->mapping) {        !! 2411                 if (!page->mapping) {
3170                         folio_unlock(folio);  !! 2412                         unlock_page(page);
3171                         return 0; /* retry */    2413                         return 0; /* retry */
3172                 }                                2414                 }
3173                 ret |= VM_FAULT_LOCKED;          2415                 ret |= VM_FAULT_LOCKED;
3174         } else                                   2416         } else
3175                 VM_BUG_ON_FOLIO(!folio_test_l !! 2417                 VM_BUG_ON_PAGE(!PageLocked(page), page);
3176         return ret;                              2418         return ret;
3177 }                                                2419 }
3178                                                  2420 
3179 /*                                               2421 /*
3180  * Handle dirtying of a page in shared file m    2422  * Handle dirtying of a page in shared file mapping on a write fault.
3181  *                                               2423  *
3182  * The function expects the page to be locked    2424  * The function expects the page to be locked and unlocks it.
3183  */                                              2425  */
3184 static vm_fault_t fault_dirty_shared_page(str !! 2426 static void fault_dirty_shared_page(struct vm_area_struct *vma,
                                                   >> 2427                                     struct page *page)
3185 {                                                2428 {
3186         struct vm_area_struct *vma = vmf->vma << 
3187         struct address_space *mapping;           2429         struct address_space *mapping;
3188         struct folio *folio = page_folio(vmf- << 
3189         bool dirtied;                            2430         bool dirtied;
3190         bool page_mkwrite = vma->vm_ops && vm    2431         bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3191                                                  2432 
3192         dirtied = folio_mark_dirty(folio);    !! 2433         dirtied = set_page_dirty(page);
3193         VM_BUG_ON_FOLIO(folio_test_anon(folio !! 2434         VM_BUG_ON_PAGE(PageAnon(page), page);
3194         /*                                       2435         /*
3195          * Take a local copy of the address_s !! 2436          * Take a local copy of the address_space - page.mapping may be zeroed
3196          * by truncate after folio_unlock().  !! 2437          * by truncate after unlock_page().   The address_space itself remains
3197          * pinned by vma->vm_file's reference !! 2438          * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
3198          * release semantics to prevent the c    2439          * release semantics to prevent the compiler from undoing this copying.
3199          */                                      2440          */
3200         mapping = folio_raw_mapping(folio);   !! 2441         mapping = page_rmapping(page);
3201         folio_unlock(folio);                  !! 2442         unlock_page(page);
3202                                                  2443 
3203         if (!page_mkwrite)                    << 
3204                 file_update_time(vma->vm_file << 
3205                                               << 
3206         /*                                    << 
3207          * Throttle page dirtying rate down t << 
3208          *                                    << 
3209          * mapping may be NULL here because s << 
3210          * set page.mapping but still dirty t << 
3211          *                                    << 
3212          * Drop the mmap_lock before waiting  << 
3213          * is pinning the mapping, as per abo << 
3214          */                                   << 
3215         if ((dirtied || page_mkwrite) && mapp    2444         if ((dirtied || page_mkwrite) && mapping) {
3216                 struct file *fpin;            !! 2445                 /*
3217                                               !! 2446                  * Some device drivers do not set page.mapping
3218                 fpin = maybe_unlock_mmap_for_ !! 2447                  * but still dirty their pages
                                                   >> 2448                  */
3219                 balance_dirty_pages_ratelimit    2449                 balance_dirty_pages_ratelimited(mapping);
3220                 if (fpin) {                   << 
3221                         fput(fpin);           << 
3222                         return VM_FAULT_COMPL << 
3223                 }                             << 
3224         }                                        2450         }
3225                                                  2451 
3226         return 0;                             !! 2452         if (!page_mkwrite)
                                                   >> 2453                 file_update_time(vma->vm_file);
3227 }                                                2454 }
3228                                                  2455 
3229 /*                                               2456 /*
3230  * Handle write page faults for pages that ca    2457  * Handle write page faults for pages that can be reused in the current vma
3231  *                                               2458  *
3232  * This can happen either due to the mapping     2459  * This can happen either due to the mapping being with the VM_SHARED flag,
3233  * or due to us being the last reference stan    2460  * or due to us being the last reference standing to the page. In either
3234  * case, all we need to do here is to mark th    2461  * case, all we need to do here is to mark the page as writable and update
3235  * any related book-keeping.                     2462  * any related book-keeping.
3236  */                                              2463  */
3237 static inline void wp_page_reuse(struct vm_fa !! 2464 static inline void wp_page_reuse(struct vm_fault *vmf)
3238         __releases(vmf->ptl)                     2465         __releases(vmf->ptl)
3239 {                                                2466 {
3240         struct vm_area_struct *vma = vmf->vma    2467         struct vm_area_struct *vma = vmf->vma;
                                                   >> 2468         struct page *page = vmf->page;
3241         pte_t entry;                             2469         pte_t entry;
3242                                               !! 2470         /*
3243         VM_BUG_ON(!(vmf->flags & FAULT_FLAG_W !! 2471          * Clear the pages cpupid information as the existing
3244         VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->o !! 2472          * information potentially belongs to a now completely
3245                                               !! 2473          * unrelated process.
3246         if (folio) {                          !! 2474          */
3247                 VM_BUG_ON(folio_test_anon(fol !! 2475         if (page)
3248                           !PageAnonExclusive( !! 2476                 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3249                 /*                            << 
3250                  * Clear the folio's cpupid i << 
3251                  * information potentially be << 
3252                  * unrelated process.         << 
3253                  */                           << 
3254                 folio_xchg_last_cpupid(folio, << 
3255         }                                     << 
3256                                                  2477 
3257         flush_cache_page(vma, vmf->address, p    2478         flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3258         entry = pte_mkyoung(vmf->orig_pte);      2479         entry = pte_mkyoung(vmf->orig_pte);
3259         entry = maybe_mkwrite(pte_mkdirty(ent    2480         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3260         if (ptep_set_access_flags(vma, vmf->a    2481         if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3261                 update_mmu_cache_range(vmf, v !! 2482                 update_mmu_cache(vma, vmf->address, vmf->pte);
3262         pte_unmap_unlock(vmf->pte, vmf->ptl);    2483         pte_unmap_unlock(vmf->pte, vmf->ptl);
3263         count_vm_event(PGREUSE);              << 
3264 }                                                2484 }
3265                                                  2485 
3266 /*                                               2486 /*
3267  * We could add a bitflag somewhere, but for  !! 2487  * Handle the case of a page which we actually need to copy to a new page.
3268  * vm_ops that have a ->map_pages have been a << 
3269  * the mmap_lock to be held.                  << 
3270  */                                           << 
3271 static inline vm_fault_t vmf_can_call_fault(c << 
3272 {                                             << 
3273         struct vm_area_struct *vma = vmf->vma << 
3274                                               << 
3275         if (vma->vm_ops->map_pages || !(vmf-> << 
3276                 return 0;                     << 
3277         vma_end_read(vma);                    << 
3278         return VM_FAULT_RETRY;                << 
3279 }                                             << 
3280                                               << 
3281 /**                                           << 
3282  * __vmf_anon_prepare - Prepare to handle an  << 
3283  * @vmf: The vm_fault descriptor passed from  << 
3284  *                                            << 
3285  * When preparing to insert an anonymous page << 
3286  * fault handler, call this function rather t << 
3287  * If this vma does not already have an assoc << 
3288  * only protected by the per-VMA lock, the ca << 
3289  * mmap_lock held.  __anon_vma_prepare() will << 
3290  * determine if this VMA can share its anon_v << 
3291  * do with only the per-VMA lock held for thi << 
3292  *                                               2488  *
3293  * Return: 0 if fault handling can proceed.   !! 2489  * Called with mmap_sem locked and the old page referenced, but
3294  * returned to the caller.                    << 
3295  */                                           << 
3296 vm_fault_t __vmf_anon_prepare(struct vm_fault << 
3297 {                                             << 
3298         struct vm_area_struct *vma = vmf->vma << 
3299         vm_fault_t ret = 0;                   << 
3300                                               << 
3301         if (likely(vma->anon_vma))            << 
3302                 return 0;                     << 
3303         if (vmf->flags & FAULT_FLAG_VMA_LOCK) << 
3304                 if (!mmap_read_trylock(vma->v << 
3305                         return VM_FAULT_RETRY << 
3306         }                                     << 
3307         if (__anon_vma_prepare(vma))          << 
3308                 ret = VM_FAULT_OOM;           << 
3309         if (vmf->flags & FAULT_FLAG_VMA_LOCK) << 
3310                 mmap_read_unlock(vma->vm_mm); << 
3311         return ret;                           << 
3312 }                                             << 
3313                                               << 
3314 /*                                            << 
3315  * Handle the case of a page which we actuall << 
3316  * either due to COW or unsharing.            << 
3317  *                                            << 
3318  * Called with mmap_lock locked and the old p << 
3319  * without the ptl held.                         2490  * without the ptl held.
3320  *                                               2491  *
3321  * High level logic flow:                        2492  * High level logic flow:
3322  *                                               2493  *
3323  * - Allocate a page, copy the content of the    2494  * - Allocate a page, copy the content of the old page to the new one.
3324  * - Handle book keeping and accounting - cgr    2495  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3325  * - Take the PTL. If the pte changed, bail o    2496  * - Take the PTL. If the pte changed, bail out and release the allocated page
3326  * - If the pte is still the way we remember     2497  * - If the pte is still the way we remember it, update the page table and all
3327  *   relevant references. This includes dropp    2498  *   relevant references. This includes dropping the reference the page-table
3328  *   held to the old page, as well as updatin    2499  *   held to the old page, as well as updating the rmap.
3329  * - In any case, unlock the PTL and drop the    2500  * - In any case, unlock the PTL and drop the reference we took to the old page.
3330  */                                              2501  */
3331 static vm_fault_t wp_page_copy(struct vm_faul !! 2502 static int wp_page_copy(struct vm_fault *vmf)
3332 {                                                2503 {
3333         const bool unshare = vmf->flags & FAU << 
3334         struct vm_area_struct *vma = vmf->vma    2504         struct vm_area_struct *vma = vmf->vma;
3335         struct mm_struct *mm = vma->vm_mm;       2505         struct mm_struct *mm = vma->vm_mm;
3336         struct folio *old_folio = NULL;       !! 2506         struct page *old_page = vmf->page;
3337         struct folio *new_folio = NULL;       !! 2507         struct page *new_page = NULL;
3338         pte_t entry;                             2508         pte_t entry;
3339         int page_copied = 0;                     2509         int page_copied = 0;
3340         struct mmu_notifier_range range;      !! 2510         const unsigned long mmun_start = vmf->address & PAGE_MASK;
3341         vm_fault_t ret;                       !! 2511         const unsigned long mmun_end = mmun_start + PAGE_SIZE;
3342         bool pfn_is_zero;                     !! 2512         struct mem_cgroup *memcg;
3343                                               << 
3344         delayacct_wpcopy_start();             << 
3345                                               << 
3346         if (vmf->page)                        << 
3347                 old_folio = page_folio(vmf->p << 
3348         ret = vmf_anon_prepare(vmf);          << 
3349         if (unlikely(ret))                    << 
3350                 goto out;                     << 
3351                                                  2513 
3352         pfn_is_zero = is_zero_pfn(pte_pfn(vmf !! 2514         if (unlikely(anon_vma_prepare(vma)))
3353         new_folio = folio_prealloc(mm, vma, v << 
3354         if (!new_folio)                       << 
3355                 goto oom;                        2515                 goto oom;
3356                                                  2516 
3357         if (!pfn_is_zero) {                   !! 2517         if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3358                 int err;                      !! 2518                 new_page = alloc_zeroed_user_highpage_movable(vma,
3359                                               !! 2519                                                               vmf->address);
3360                 err = __wp_page_copy_user(&ne !! 2520                 if (!new_page)
3361                 if (err) {                    !! 2521                         goto oom;
3362                         /*                    !! 2522         } else {
3363                          * COW failed, if the !! 2523                 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3364                          * it's fine. If not, !! 2524                                 vmf->address);
3365                          * the same address a !! 2525                 if (!new_page)
3366                          * from the second at !! 2526                         goto oom;
3367                          * The -EHWPOISON cas !! 2527                 cow_user_page(new_page, old_page, vmf->address, vma);
3368                          */                   << 
3369                         folio_put(new_folio); << 
3370                         if (old_folio)        << 
3371                                 folio_put(old << 
3372                                               << 
3373                         delayacct_wpcopy_end( << 
3374                         return err == -EHWPOI << 
3375                 }                             << 
3376                 kmsan_copy_page_meta(&new_fol << 
3377         }                                        2528         }
3378                                                  2529 
3379         __folio_mark_uptodate(new_folio);     !! 2530         if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
                                                   >> 2531                 goto oom_free_new;
                                                   >> 2532 
                                                   >> 2533         __SetPageUptodate(new_page);
3380                                                  2534 
3381         mmu_notifier_range_init(&range, MMU_N !! 2535         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3382                                 vmf->address  << 
3383                                 (vmf->address << 
3384         mmu_notifier_invalidate_range_start(& << 
3385                                                  2536 
3386         /*                                       2537         /*
3387          * Re-check the pte - we dropped the     2538          * Re-check the pte - we dropped the lock
3388          */                                      2539          */
3389         vmf->pte = pte_offset_map_lock(mm, vm    2540         vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3390         if (likely(vmf->pte && pte_same(ptep_ !! 2541         if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3391                 if (old_folio) {              !! 2542                 if (old_page) {
3392                         if (!folio_test_anon( !! 2543                         if (!PageAnon(old_page)) {
3393                                 dec_mm_counte !! 2544                                 dec_mm_counter_fast(mm,
3394                                 inc_mm_counte !! 2545                                                 mm_counter_file(old_page));
                                                   >> 2546                                 inc_mm_counter_fast(mm, MM_ANONPAGES);
3395                         }                        2547                         }
3396                 } else {                         2548                 } else {
3397                         ksm_might_unmap_zero_ !! 2549                         inc_mm_counter_fast(mm, MM_ANONPAGES);
3398                         inc_mm_counter(mm, MM << 
3399                 }                                2550                 }
3400                 flush_cache_page(vma, vmf->ad    2551                 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3401                 entry = mk_pte(&new_folio->pa !! 2552                 entry = mk_pte(new_page, vma->vm_page_prot);
3402                 entry = pte_sw_mkyoung(entry) !! 2553                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3403                 if (unlikely(unshare)) {      << 
3404                         if (pte_soft_dirty(vm << 
3405                                 entry = pte_m << 
3406                         if (pte_uffd_wp(vmf-> << 
3407                                 entry = pte_m << 
3408                 } else {                      << 
3409                         entry = maybe_mkwrite << 
3410                 }                             << 
3411                                               << 
3412                 /*                               2554                 /*
3413                  * Clear the pte entry and fl    2555                  * Clear the pte entry and flush it first, before updating the
3414                  * pte with the new entry, to !! 2556                  * pte with the new entry. This will avoid a race condition
3415                  * sync. This code used to se !! 2557                  * seen in the presence of one thread doing SMC and another
3416                  * that left a window where t !! 2558                  * thread doing COW.
3417                  * some TLBs while the old PT !! 2559                  */
                                                   >> 2560                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
                                                   >> 2561                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
                                                   >> 2562                 mem_cgroup_commit_charge(new_page, memcg, false, false);
                                                   >> 2563                 lru_cache_add_active_or_unevictable(new_page, vma);
                                                   >> 2564                 /*
                                                   >> 2565                  * We call the notify macro here because, when using secondary
                                                   >> 2566                  * mmu page tables (such as kvm shadow page tables), we want the
                                                   >> 2567                  * new page to be mapped directly into the secondary page table.
3418                  */                              2568                  */
3419                 ptep_clear_flush(vma, vmf->ad !! 2569                 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3420                 folio_add_new_anon_rmap(new_f !! 2570                 update_mmu_cache(vma, vmf->address, vmf->pte);
3421                 folio_add_lru_vma(new_folio,  !! 2571                 if (old_page) {
3422                 BUG_ON(unshare && pte_write(e << 
3423                 set_pte_at(mm, vmf->address,  << 
3424                 update_mmu_cache_range(vmf, v << 
3425                 if (old_folio) {              << 
3426                         /*                       2572                         /*
3427                          * Only after switchi    2573                          * Only after switching the pte to the new page may
3428                          * we remove the mapc    2574                          * we remove the mapcount here. Otherwise another
3429                          * process may come a    2575                          * process may come and find the rmap count decremented
3430                          * before the pte is     2576                          * before the pte is switched to the new page, and
3431                          * "reuse" the old pa    2577                          * "reuse" the old page writing into it while our pte
3432                          * here still points     2578                          * here still points into it and can be read by other
3433                          * threads.              2579                          * threads.
3434                          *                       2580                          *
3435                          * The critical issue    2581                          * The critical issue is to order this
3436                          * folio_remove_rmap_ !! 2582                          * page_remove_rmap with the ptp_clear_flush above.
3437                          * above. Those store !! 2583                          * Those stores are ordered by (if nothing else,)
3438                          * the barrier presen    2584                          * the barrier present in the atomic_add_negative
3439                          * in folio_remove_rm !! 2585                          * in page_remove_rmap.
3440                          *                       2586                          *
3441                          * Then the TLB flush    2587                          * Then the TLB flush in ptep_clear_flush ensures that
3442                          * no process can acc    2588                          * no process can access the old page before the
3443                          * decremented mapcou    2589                          * decremented mapcount is visible. And the old page
3444                          * cannot be reused u    2590                          * cannot be reused until after the decremented
3445                          * mapcount is visibl    2591                          * mapcount is visible. So transitively, TLBs to
3446                          * old page will be f    2592                          * old page will be flushed before it can be reused.
3447                          */                      2593                          */
3448                         folio_remove_rmap_pte !! 2594                         page_remove_rmap(old_page, false);
3449                 }                                2595                 }
3450                                                  2596 
3451                 /* Free the old page.. */        2597                 /* Free the old page.. */
3452                 new_folio = old_folio;        !! 2598                 new_page = old_page;
3453                 page_copied = 1;                 2599                 page_copied = 1;
3454                 pte_unmap_unlock(vmf->pte, vm !! 2600         } else {
3455         } else if (vmf->pte) {                !! 2601                 mem_cgroup_cancel_charge(new_page, memcg, false);
3456                 update_mmu_tlb(vma, vmf->addr << 
3457                 pte_unmap_unlock(vmf->pte, vm << 
3458         }                                        2602         }
3459                                                  2603 
3460         mmu_notifier_invalidate_range_end(&ra !! 2604         if (new_page)
3461                                               !! 2605                 put_page(new_page);
3462         if (new_folio)                        << 
3463                 folio_put(new_folio);         << 
3464         if (old_folio) {                      << 
3465                 if (page_copied)              << 
3466                         free_swap_cache(old_f << 
3467                 folio_put(old_folio);         << 
3468         }                                     << 
3469                                                  2606 
3470         delayacct_wpcopy_end();               !! 2607         pte_unmap_unlock(vmf->pte, vmf->ptl);
3471         return 0;                             !! 2608         /*
                                                   >> 2609          * No need to double call mmu_notifier->invalidate_range() callback as
                                                   >> 2610          * the above ptep_clear_flush_notify() did already call it.
                                                   >> 2611          */
                                                   >> 2612         mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end);
                                                   >> 2613         if (old_page) {
                                                   >> 2614                 /*
                                                   >> 2615                  * Don't let another task, with possibly unlocked vma,
                                                   >> 2616                  * keep the mlocked page.
                                                   >> 2617                  */
                                                   >> 2618                 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
                                                   >> 2619                         lock_page(old_page);    /* LRU manipulation */
                                                   >> 2620                         if (PageMlocked(old_page))
                                                   >> 2621                                 munlock_vma_page(old_page);
                                                   >> 2622                         unlock_page(old_page);
                                                   >> 2623                 }
                                                   >> 2624                 put_page(old_page);
                                                   >> 2625         }
                                                   >> 2626         return page_copied ? VM_FAULT_WRITE : 0;
                                                   >> 2627 oom_free_new:
                                                   >> 2628         put_page(new_page);
3472 oom:                                             2629 oom:
3473         ret = VM_FAULT_OOM;                   !! 2630         if (old_page)
3474 out:                                          !! 2631                 put_page(old_page);
3475         if (old_folio)                        !! 2632         return VM_FAULT_OOM;
3476                 folio_put(old_folio);         << 
3477                                               << 
3478         delayacct_wpcopy_end();               << 
3479         return ret;                           << 
3480 }                                                2633 }
3481                                                  2634 
3482 /**                                              2635 /**
3483  * finish_mkwrite_fault - finish page fault f    2636  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3484  *                        writeable once the     2637  *                        writeable once the page is prepared
3485  *                                               2638  *
3486  * @vmf: structure describing the fault          2639  * @vmf: structure describing the fault
3487  * @folio: the folio of vmf->page             << 
3488  *                                               2640  *
3489  * This function handles all that is needed t    2641  * This function handles all that is needed to finish a write page fault in a
3490  * shared mapping due to PTE being read-only     2642  * shared mapping due to PTE being read-only once the mapped page is prepared.
3491  * It handles locking of PTE and modifying it !! 2643  * It handles locking of PTE and modifying it. The function returns
                                                   >> 2644  * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
                                                   >> 2645  * lock.
3492  *                                               2646  *
3493  * The function expects the page to be locked    2647  * The function expects the page to be locked or other protection against
3494  * concurrent faults / writeback (such as DAX    2648  * concurrent faults / writeback (such as DAX radix tree locks).
3495  *                                            << 
3496  * Return: %0 on success, %VM_FAULT_NOPAGE wh << 
3497  * we acquired PTE lock.                      << 
3498  */                                              2649  */
3499 static vm_fault_t finish_mkwrite_fault(struct !! 2650 int finish_mkwrite_fault(struct vm_fault *vmf)
3500 {                                                2651 {
3501         WARN_ON_ONCE(!(vmf->vma->vm_flags & V    2652         WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3502         vmf->pte = pte_offset_map_lock(vmf->v    2653         vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3503                                        &vmf->    2654                                        &vmf->ptl);
3504         if (!vmf->pte)                        << 
3505                 return VM_FAULT_NOPAGE;       << 
3506         /*                                       2655         /*
3507          * We might have raced with another p    2656          * We might have raced with another page fault while we released the
3508          * pte_offset_map_lock.                  2657          * pte_offset_map_lock.
3509          */                                      2658          */
3510         if (!pte_same(ptep_get(vmf->pte), vmf !! 2659         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3511                 update_mmu_tlb(vmf->vma, vmf- << 
3512                 pte_unmap_unlock(vmf->pte, vm    2660                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3513                 return VM_FAULT_NOPAGE;          2661                 return VM_FAULT_NOPAGE;
3514         }                                        2662         }
3515         wp_page_reuse(vmf, folio);            !! 2663         wp_page_reuse(vmf);
3516         return 0;                                2664         return 0;
3517 }                                                2665 }
3518                                                  2666 
3519 /*                                               2667 /*
3520  * Handle write page faults for VM_MIXEDMAP o    2668  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3521  * mapping                                       2669  * mapping
3522  */                                              2670  */
3523 static vm_fault_t wp_pfn_shared(struct vm_fau !! 2671 static int wp_pfn_shared(struct vm_fault *vmf)
3524 {                                                2672 {
3525         struct vm_area_struct *vma = vmf->vma    2673         struct vm_area_struct *vma = vmf->vma;
3526                                                  2674 
3527         if (vma->vm_ops && vma->vm_ops->pfn_m    2675         if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3528                 vm_fault_t ret;               !! 2676                 int ret;
3529                                                  2677 
3530                 pte_unmap_unlock(vmf->pte, vm    2678                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3531                 ret = vmf_can_call_fault(vmf) << 
3532                 if (ret)                      << 
3533                         return ret;           << 
3534                                               << 
3535                 vmf->flags |= FAULT_FLAG_MKWR    2679                 vmf->flags |= FAULT_FLAG_MKWRITE;
3536                 ret = vma->vm_ops->pfn_mkwrit    2680                 ret = vma->vm_ops->pfn_mkwrite(vmf);
3537                 if (ret & (VM_FAULT_ERROR | V    2681                 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3538                         return ret;              2682                         return ret;
3539                 return finish_mkwrite_fault(v !! 2683                 return finish_mkwrite_fault(vmf);
3540         }                                        2684         }
3541         wp_page_reuse(vmf, NULL);             !! 2685         wp_page_reuse(vmf);
3542         return 0;                             !! 2686         return VM_FAULT_WRITE;
3543 }                                                2687 }
3544                                                  2688 
3545 static vm_fault_t wp_page_shared(struct vm_fa !! 2689 static int wp_page_shared(struct vm_fault *vmf)
3546         __releases(vmf->ptl)                     2690         __releases(vmf->ptl)
3547 {                                                2691 {
3548         struct vm_area_struct *vma = vmf->vma    2692         struct vm_area_struct *vma = vmf->vma;
3549         vm_fault_t ret = 0;                   << 
3550                                                  2693 
3551         folio_get(folio);                     !! 2694         get_page(vmf->page);
3552                                                  2695 
3553         if (vma->vm_ops && vma->vm_ops->page_    2696         if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3554                 vm_fault_t tmp;               !! 2697                 int tmp;
3555                                                  2698 
3556                 pte_unmap_unlock(vmf->pte, vm    2699                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3557                 tmp = vmf_can_call_fault(vmf) !! 2700                 tmp = do_page_mkwrite(vmf);
3558                 if (tmp) {                    << 
3559                         folio_put(folio);     << 
3560                         return tmp;           << 
3561                 }                             << 
3562                                               << 
3563                 tmp = do_page_mkwrite(vmf, fo << 
3564                 if (unlikely(!tmp || (tmp &      2701                 if (unlikely(!tmp || (tmp &
3565                                       (VM_FAU    2702                                       (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3566                         folio_put(folio);     !! 2703                         put_page(vmf->page);
3567                         return tmp;              2704                         return tmp;
3568                 }                                2705                 }
3569                 tmp = finish_mkwrite_fault(vm !! 2706                 tmp = finish_mkwrite_fault(vmf);
3570                 if (unlikely(tmp & (VM_FAULT_    2707                 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3571                         folio_unlock(folio);  !! 2708                         unlock_page(vmf->page);
3572                         folio_put(folio);     !! 2709                         put_page(vmf->page);
3573                         return tmp;              2710                         return tmp;
3574                 }                                2711                 }
3575         } else {                                 2712         } else {
3576                 wp_page_reuse(vmf, folio);    !! 2713                 wp_page_reuse(vmf);
3577                 folio_lock(folio);            !! 2714                 lock_page(vmf->page);
3578         }                                        2715         }
3579         ret |= fault_dirty_shared_page(vmf);  !! 2716         fault_dirty_shared_page(vma, vmf->page);
3580         folio_put(folio);                     !! 2717         put_page(vmf->page);
3581                                                  2718 
3582         return ret;                           !! 2719         return VM_FAULT_WRITE;
3583 }                                             << 
3584                                               << 
3585 static bool wp_can_reuse_anon_folio(struct fo << 
3586                                     struct vm << 
3587 {                                             << 
3588         /*                                    << 
3589          * We could currently only reuse a su << 
3590          * other subpages of the large folios << 
3591          * let's just consistently not reuse  << 
3592          * reuse in that scenario, and give b << 
3593          * sooner.                            << 
3594          */                                   << 
3595         if (folio_test_large(folio))          << 
3596                 return false;                 << 
3597                                               << 
3598         /*                                    << 
3599          * We have to verify under folio lock << 
3600          * just an optimization to avoid lock << 
3601          * the swapcache if there is little h << 
3602          *                                    << 
3603          * KSM doesn't necessarily raise the  << 
3604          */                                   << 
3605         if (folio_test_ksm(folio) || folio_re << 
3606                 return false;                 << 
3607         if (!folio_test_lru(folio))           << 
3608                 /*                            << 
3609                  * We cannot easily detect+ha << 
3610                  * remote LRU caches or refer << 
3611                  */                           << 
3612                 lru_add_drain();              << 
3613         if (folio_ref_count(folio) > 1 + foli << 
3614                 return false;                 << 
3615         if (!folio_trylock(folio))            << 
3616                 return false;                 << 
3617         if (folio_test_swapcache(folio))      << 
3618                 folio_free_swap(folio);       << 
3619         if (folio_test_ksm(folio) || folio_re << 
3620                 folio_unlock(folio);          << 
3621                 return false;                 << 
3622         }                                     << 
3623         /*                                    << 
3624          * Ok, we've got the only folio refer << 
3625          * and the folio is locked, it's dark << 
3626          * sunglasses. Hit it.                << 
3627          */                                   << 
3628         folio_move_anon_rmap(folio, vma);     << 
3629         folio_unlock(folio);                  << 
3630         return true;                          << 
3631 }                                                2720 }
3632                                                  2721 
3633 /*                                               2722 /*
3634  * This routine handles present pages, when   !! 2723  * This routine handles present pages, when users try to write
3635  * * users try to write to a shared page (FAU !! 2724  * to a shared page. It is done by copying the page to a new address
3636  * * GUP wants to take a R/O pin on a possibl !! 2725  * and decrementing the shared-page counter for the old page.
3637  *   (FAULT_FLAG_UNSHARE)                     << 
3638  *                                            << 
3639  * It is done by copying the page to a new ad << 
3640  * shared-page counter for the old page.      << 
3641  *                                               2726  *
3642  * Note that this routine assumes that the pr    2727  * Note that this routine assumes that the protection checks have been
3643  * done by the caller (the low-level page fau    2728  * done by the caller (the low-level page fault routine in most cases).
3644  * Thus, with FAULT_FLAG_WRITE, we can safely !! 2729  * Thus we can safely just mark it writable once we've done any necessary
3645  * done any necessary COW.                    !! 2730  * COW.
3646  *                                               2731  *
3647  * In case of FAULT_FLAG_WRITE, we also mark  !! 2732  * We also mark the page dirty at this point even though the page will
3648  * though the page will change only once the  !! 2733  * change only once the write actually happens. This avoids a few races,
3649  * avoids a few races, and potentially makes  !! 2734  * and potentially makes it more efficient.
3650  *                                               2735  *
3651  * We enter with non-exclusive mmap_lock (to  !! 2736  * We enter with non-exclusive mmap_sem (to exclude vma changes,
3652  * but allow concurrent faults), with pte bot    2737  * but allow concurrent faults), with pte both mapped and locked.
3653  * We return with mmap_lock still held, but p !! 2738  * We return with mmap_sem still held, but pte unmapped and unlocked.
3654  */                                              2739  */
3655 static vm_fault_t do_wp_page(struct vm_fault  !! 2740 static int do_wp_page(struct vm_fault *vmf)
3656         __releases(vmf->ptl)                     2741         __releases(vmf->ptl)
3657 {                                                2742 {
3658         const bool unshare = vmf->flags & FAU << 
3659         struct vm_area_struct *vma = vmf->vma    2743         struct vm_area_struct *vma = vmf->vma;
3660         struct folio *folio = NULL;           << 
3661         pte_t pte;                            << 
3662                                               << 
3663         if (likely(!unshare)) {               << 
3664                 if (userfaultfd_pte_wp(vma, p << 
3665                         if (!userfaultfd_wp_a << 
3666                                 pte_unmap_unl << 
3667                                 return handle << 
3668                         }                     << 
3669                                               << 
3670                         /*                    << 
3671                          * Nothing needed (ca << 
3672                          * etc.) because we'r << 
3673                          * which is completel << 
3674                          */                   << 
3675                         pte = pte_clear_uffd_ << 
3676                                               << 
3677                         set_pte_at(vma->vm_mm << 
3678                         /*                    << 
3679                          * Update this to be  << 
3680                          * handling           << 
3681                          */                   << 
3682                         vmf->orig_pte = pte;  << 
3683                 }                             << 
3684                                               << 
3685                 /*                            << 
3686                  * Userfaultfd write-protect  << 
3687                  * is flushed in this case be << 
3688                  */                           << 
3689                 if (unlikely(userfaultfd_wp(v << 
3690                              mm_tlb_flush_pen << 
3691                         flush_tlb_page(vmf->v << 
3692         }                                     << 
3693                                                  2744 
3694         vmf->page = vm_normal_page(vma, vmf->    2745         vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3695                                               !! 2746         if (!vmf->page) {
3696         if (vmf->page)                        << 
3697                 folio = page_folio(vmf->page) << 
3698                                               << 
3699         /*                                    << 
3700          * Shared mapping: we are guaranteed  << 
3701          * FAULT_FLAG_WRITE set at this point << 
3702          */                                   << 
3703         if (vma->vm_flags & (VM_SHARED | VM_M << 
3704                 /*                               2747                 /*
3705                  * VM_MIXEDMAP !pfn_valid() c    2748                  * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3706                  * VM_PFNMAP VMA.                2749                  * VM_PFNMAP VMA.
3707                  *                               2750                  *
3708                  * We should not cow pages in    2751                  * We should not cow pages in a shared writeable mapping.
3709                  * Just mark the pages writab    2752                  * Just mark the pages writable and/or call ops->pfn_mkwrite.
3710                  */                              2753                  */
3711                 if (!vmf->page)               !! 2754                 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                                   >> 2755                                      (VM_WRITE|VM_SHARED))
3712                         return wp_pfn_shared(    2756                         return wp_pfn_shared(vmf);
3713                 return wp_page_shared(vmf, fo !! 2757 
                                                   >> 2758                 pte_unmap_unlock(vmf->pte, vmf->ptl);
                                                   >> 2759                 return wp_page_copy(vmf);
3714         }                                        2760         }
3715                                                  2761 
3716         /*                                       2762         /*
3717          * Private mapping: create an exclusi !! 2763          * Take out anonymous pages first, anonymous shared vmas are
3718          * is impossible. We might miss VM_WR !! 2764          * not dirty accountable.
3719          *                                    << 
3720          * If we encounter a page that is mar << 
3721          * the page without further checks.   << 
3722          */                                      2765          */
3723         if (folio && folio_test_anon(folio) & !! 2766         if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
3724             (PageAnonExclusive(vmf->page) ||  !! 2767                 int total_map_swapcount;
3725                 if (!PageAnonExclusive(vmf->p !! 2768                 if (!trylock_page(vmf->page)) {
3726                         SetPageAnonExclusive( !! 2769                         get_page(vmf->page);
3727                 if (unlikely(unshare)) {      << 
3728                         pte_unmap_unlock(vmf-    2770                         pte_unmap_unlock(vmf->pte, vmf->ptl);
3729                         return 0;             !! 2771                         lock_page(vmf->page);
                                                   >> 2772                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
                                                   >> 2773                                         vmf->address, &vmf->ptl);
                                                   >> 2774                         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
                                                   >> 2775                                 unlock_page(vmf->page);
                                                   >> 2776                                 pte_unmap_unlock(vmf->pte, vmf->ptl);
                                                   >> 2777                                 put_page(vmf->page);
                                                   >> 2778                                 return 0;
                                                   >> 2779                         }
                                                   >> 2780                         put_page(vmf->page);
3730                 }                                2781                 }
3731                 wp_page_reuse(vmf, folio);    !! 2782                 if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
3732                 return 0;                     !! 2783                         if (total_map_swapcount == 1) {
                                                   >> 2784                                 /*
                                                   >> 2785                                  * The page is all ours. Move it to
                                                   >> 2786                                  * our anon_vma so the rmap code will
                                                   >> 2787                                  * not search our parent or siblings.
                                                   >> 2788                                  * Protected against the rmap code by
                                                   >> 2789                                  * the page lock.
                                                   >> 2790                                  */
                                                   >> 2791                                 page_move_anon_rmap(vmf->page, vma);
                                                   >> 2792                         }
                                                   >> 2793                         unlock_page(vmf->page);
                                                   >> 2794                         wp_page_reuse(vmf);
                                                   >> 2795                         return VM_FAULT_WRITE;
                                                   >> 2796                 }
                                                   >> 2797                 unlock_page(vmf->page);
                                                   >> 2798         } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                                   >> 2799                                         (VM_WRITE|VM_SHARED))) {
                                                   >> 2800                 return wp_page_shared(vmf);
3733         }                                        2801         }
                                                   >> 2802 
3734         /*                                       2803         /*
3735          * Ok, we need to copy. Oh, well..       2804          * Ok, we need to copy. Oh, well..
3736          */                                      2805          */
3737         if (folio)                            !! 2806         get_page(vmf->page);
3738                 folio_get(folio);             << 
3739                                                  2807 
3740         pte_unmap_unlock(vmf->pte, vmf->ptl);    2808         pte_unmap_unlock(vmf->pte, vmf->ptl);
3741 #ifdef CONFIG_KSM                             << 
3742         if (folio && folio_test_ksm(folio))   << 
3743                 count_vm_event(COW_KSM);      << 
3744 #endif                                        << 
3745         return wp_page_copy(vmf);                2809         return wp_page_copy(vmf);
3746 }                                                2810 }
3747                                                  2811 
3748 static void unmap_mapping_range_vma(struct vm    2812 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3749                 unsigned long start_addr, uns    2813                 unsigned long start_addr, unsigned long end_addr,
3750                 struct zap_details *details)     2814                 struct zap_details *details)
3751 {                                                2815 {
3752         zap_page_range_single(vma, start_addr    2816         zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3753 }                                                2817 }
3754                                                  2818 
3755 static inline void unmap_mapping_range_tree(s    2819 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3756                                             p << 
3757                                             p << 
3758                                             s    2820                                             struct zap_details *details)
3759 {                                                2821 {
3760         struct vm_area_struct *vma;              2822         struct vm_area_struct *vma;
3761         pgoff_t vba, vea, zba, zea;              2823         pgoff_t vba, vea, zba, zea;
3762                                                  2824 
3763         vma_interval_tree_foreach(vma, root,  !! 2825         vma_interval_tree_foreach(vma, root,
                                                   >> 2826                         details->first_index, details->last_index) {
                                                   >> 2827 
3764                 vba = vma->vm_pgoff;             2828                 vba = vma->vm_pgoff;
3765                 vea = vba + vma_pages(vma) -     2829                 vea = vba + vma_pages(vma) - 1;
3766                 zba = max(first_index, vba);  !! 2830                 zba = details->first_index;
3767                 zea = min(last_index, vea);   !! 2831                 if (zba < vba)
                                                   >> 2832                         zba = vba;
                                                   >> 2833                 zea = details->last_index;
                                                   >> 2834                 if (zea > vea)
                                                   >> 2835                         zea = vea;
3768                                                  2836 
3769                 unmap_mapping_range_vma(vma,     2837                 unmap_mapping_range_vma(vma,
3770                         ((zba - vba) << PAGE_    2838                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3771                         ((zea - vba + 1) << P    2839                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3772                                 details);        2840                                 details);
3773         }                                        2841         }
3774 }                                                2842 }
3775                                                  2843 
3776 /**                                              2844 /**
3777  * unmap_mapping_folio() - Unmap single folio << 
3778  * @folio: The locked folio to be unmapped.   << 
3779  *                                            << 
3780  * Unmap this folio from any userspace proces << 
3781  * Typically, for efficiency, the range of ne << 
3782  * unmapped by unmap_mapping_pages() or unmap << 
3783  * truncation or invalidation holds the lock  << 
3784  * the page has been remapped again: and then << 
3785  * to unmap it finally.                       << 
3786  */                                           << 
3787 void unmap_mapping_folio(struct folio *folio) << 
3788 {                                             << 
3789         struct address_space *mapping = folio << 
3790         struct zap_details details = { };     << 
3791         pgoff_t first_index;                  << 
3792         pgoff_t last_index;                   << 
3793                                               << 
3794         VM_BUG_ON(!folio_test_locked(folio)); << 
3795                                               << 
3796         first_index = folio->index;           << 
3797         last_index = folio_next_index(folio)  << 
3798                                               << 
3799         details.even_cows = false;            << 
3800         details.single_folio = folio;         << 
3801         details.zap_flags = ZAP_FLAG_DROP_MAR << 
3802                                               << 
3803         i_mmap_lock_read(mapping);            << 
3804         if (unlikely(!RB_EMPTY_ROOT(&mapping- << 
3805                 unmap_mapping_range_tree(&map << 
3806                                          last << 
3807         i_mmap_unlock_read(mapping);          << 
3808 }                                             << 
3809                                               << 
3810 /**                                           << 
3811  * unmap_mapping_pages() - Unmap pages from p    2845  * unmap_mapping_pages() - Unmap pages from processes.
3812  * @mapping: The address space containing pag    2846  * @mapping: The address space containing pages to be unmapped.
3813  * @start: Index of first page to be unmapped    2847  * @start: Index of first page to be unmapped.
3814  * @nr: Number of pages to be unmapped.  0 to    2848  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3815  * @even_cows: Whether to unmap even private     2849  * @even_cows: Whether to unmap even private COWed pages.
3816  *                                               2850  *
3817  * Unmap the pages in this address space from    2851  * Unmap the pages in this address space from any userspace process which
3818  * has them mmaped.  Generally, you want to r    2852  * has them mmaped.  Generally, you want to remove COWed pages as well when
3819  * a file is being truncated, but not when in    2853  * a file is being truncated, but not when invalidating pages from the page
3820  * cache.                                        2854  * cache.
3821  */                                              2855  */
3822 void unmap_mapping_pages(struct address_space    2856 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3823                 pgoff_t nr, bool even_cows)      2857                 pgoff_t nr, bool even_cows)
3824 {                                                2858 {
3825         struct zap_details details = { };        2859         struct zap_details details = { };
3826         pgoff_t first_index = start;          << 
3827         pgoff_t last_index = start + nr - 1;  << 
3828                                                  2860 
3829         details.even_cows = even_cows;        !! 2861         details.check_mapping = even_cows ? NULL : mapping;
3830         if (last_index < first_index)         !! 2862         details.first_index = start;
3831                 last_index = ULONG_MAX;       !! 2863         details.last_index = start + nr - 1;
                                                   >> 2864         if (details.last_index < details.first_index)
                                                   >> 2865                 details.last_index = ULONG_MAX;
3832                                                  2866 
3833         i_mmap_lock_read(mapping);            !! 2867         i_mmap_lock_write(mapping);
3834         if (unlikely(!RB_EMPTY_ROOT(&mapping-    2868         if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3835                 unmap_mapping_range_tree(&map !! 2869                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
3836                                          last !! 2870         i_mmap_unlock_write(mapping);
3837         i_mmap_unlock_read(mapping);          << 
3838 }                                                2871 }
3839 EXPORT_SYMBOL_GPL(unmap_mapping_pages);       << 
3840                                                  2872 
3841 /**                                              2873 /**
3842  * unmap_mapping_range - unmap the portion of    2874  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3843  * address_space corresponding to the specifi    2875  * address_space corresponding to the specified byte range in the underlying
3844  * file.                                         2876  * file.
3845  *                                               2877  *
3846  * @mapping: the address space containing mma    2878  * @mapping: the address space containing mmaps to be unmapped.
3847  * @holebegin: byte in first page to unmap, r    2879  * @holebegin: byte in first page to unmap, relative to the start of
3848  * the underlying file.  This will be rounded    2880  * the underlying file.  This will be rounded down to a PAGE_SIZE
3849  * boundary.  Note that this is different fro    2881  * boundary.  Note that this is different from truncate_pagecache(), which
3850  * must keep the partial page.  In contrast,     2882  * must keep the partial page.  In contrast, we must get rid of
3851  * partial pages.                                2883  * partial pages.
3852  * @holelen: size of prospective hole in byte    2884  * @holelen: size of prospective hole in bytes.  This will be rounded
3853  * up to a PAGE_SIZE boundary.  A holelen of     2885  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3854  * end of the file.                              2886  * end of the file.
3855  * @even_cows: 1 when truncating a file, unma    2887  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3856  * but 0 when invalidating pagecache, don't t    2888  * but 0 when invalidating pagecache, don't throw away private data.
3857  */                                              2889  */
3858 void unmap_mapping_range(struct address_space    2890 void unmap_mapping_range(struct address_space *mapping,
3859                 loff_t const holebegin, loff_    2891                 loff_t const holebegin, loff_t const holelen, int even_cows)
3860 {                                                2892 {
3861         pgoff_t hba = (pgoff_t)(holebegin) >> !! 2893         pgoff_t hba = holebegin >> PAGE_SHIFT;
3862         pgoff_t hlen = ((pgoff_t)(holelen) +  !! 2894         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3863                                                  2895 
3864         /* Check for overflow. */                2896         /* Check for overflow. */
3865         if (sizeof(holelen) > sizeof(hlen)) {    2897         if (sizeof(holelen) > sizeof(hlen)) {
3866                 long long holeend =              2898                 long long holeend =
3867                         (holebegin + holelen     2899                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3868                 if (holeend & ~(long long)ULO    2900                 if (holeend & ~(long long)ULONG_MAX)
3869                         hlen = ULONG_MAX - hb    2901                         hlen = ULONG_MAX - hba + 1;
3870         }                                        2902         }
3871                                                  2903 
3872         unmap_mapping_pages(mapping, hba, hle    2904         unmap_mapping_pages(mapping, hba, hlen, even_cows);
3873 }                                                2905 }
3874 EXPORT_SYMBOL(unmap_mapping_range);              2906 EXPORT_SYMBOL(unmap_mapping_range);
3875                                                  2907 
3876 /*                                               2908 /*
3877  * Restore a potential device exclusive pte t !! 2909  * We enter with non-exclusive mmap_sem (to exclude vma changes,
3878  */                                           << 
3879 static vm_fault_t remove_device_exclusive_ent << 
3880 {                                             << 
3881         struct folio *folio = page_folio(vmf- << 
3882         struct vm_area_struct *vma = vmf->vma << 
3883         struct mmu_notifier_range range;      << 
3884         vm_fault_t ret;                       << 
3885                                               << 
3886         /*                                    << 
3887          * We need a reference to lock the fo << 
3888          * the PTL so a racing thread can rem << 
3889          * entry and unmap it. If the folio i << 
3890          * have been removed already. If it h << 
3891          * been re-allocated after being free << 
3892          * unlock it.                         << 
3893          */                                   << 
3894         if (!folio_try_get(folio))            << 
3895                 return 0;                     << 
3896                                               << 
3897         ret = folio_lock_or_retry(folio, vmf) << 
3898         if (ret) {                            << 
3899                 folio_put(folio);             << 
3900                 return ret;                   << 
3901         }                                     << 
3902         mmu_notifier_range_init_owner(&range, << 
3903                                 vma->vm_mm, v << 
3904                                 (vmf->address << 
3905         mmu_notifier_invalidate_range_start(& << 
3906                                               << 
3907         vmf->pte = pte_offset_map_lock(vma->v << 
3908                                 &vmf->ptl);   << 
3909         if (likely(vmf->pte && pte_same(ptep_ << 
3910                 restore_exclusive_pte(vma, vm << 
3911                                               << 
3912         if (vmf->pte)                         << 
3913                 pte_unmap_unlock(vmf->pte, vm << 
3914         folio_unlock(folio);                  << 
3915         folio_put(folio);                     << 
3916                                               << 
3917         mmu_notifier_invalidate_range_end(&ra << 
3918         return 0;                             << 
3919 }                                             << 
3920                                               << 
3921 static inline bool should_try_to_free_swap(st << 
3922                                            st << 
3923                                            un << 
3924 {                                             << 
3925         if (!folio_test_swapcache(folio))     << 
3926                 return false;                 << 
3927         if (mem_cgroup_swap_full(folio) || (v << 
3928             folio_test_mlocked(folio))        << 
3929                 return true;                  << 
3930         /*                                    << 
3931          * If we want to map a page that's in << 
3932          * have to detect via the refcount if << 
3933          * user. Try freeing the swapcache to << 
3934          * reference only in case it's likely << 
3935          */                                   << 
3936         return (fault_flags & FAULT_FLAG_WRIT << 
3937                 folio_ref_count(folio) == (1  << 
3938 }                                             << 
3939                                               << 
3940 static vm_fault_t pte_marker_clear(struct vm_ << 
3941 {                                             << 
3942         vmf->pte = pte_offset_map_lock(vmf->v << 
3943                                        vmf->a << 
3944         if (!vmf->pte)                        << 
3945                 return 0;                     << 
3946         /*                                    << 
3947          * Be careful so that we will only re << 
3948          * none pte.  Otherwise it means the  << 
3949          *                                    << 
3950          * This should also cover the case wh << 
3951          * quickly from a PTE_MARKER_UFFD_WP  << 
3952          * So is_pte_marker() check is not en << 
3953          */                                   << 
3954         if (pte_same(vmf->orig_pte, ptep_get( << 
3955                 pte_clear(vmf->vma->vm_mm, vm << 
3956         pte_unmap_unlock(vmf->pte, vmf->ptl); << 
3957         return 0;                             << 
3958 }                                             << 
3959                                               << 
3960 static vm_fault_t do_pte_missing(struct vm_fa << 
3961 {                                             << 
3962         if (vma_is_anonymous(vmf->vma))       << 
3963                 return do_anonymous_page(vmf) << 
3964         else                                  << 
3965                 return do_fault(vmf);         << 
3966 }                                             << 
3967                                               << 
3968 /*                                            << 
3969  * This is actually a page-missing access, bu << 
3970  * installed.  It means this pte was wr-prote << 
3971  */                                           << 
3972 static vm_fault_t pte_marker_handle_uffd_wp(s << 
3973 {                                             << 
3974         /*                                    << 
3975          * Just in case there're leftover spe << 
3976          * got unregistered - we can simply c << 
3977          */                                   << 
3978         if (unlikely(!userfaultfd_wp(vmf->vma << 
3979                 return pte_marker_clear(vmf); << 
3980                                               << 
3981         return do_pte_missing(vmf);           << 
3982 }                                             << 
3983                                               << 
3984 static vm_fault_t handle_pte_marker(struct vm << 
3985 {                                             << 
3986         swp_entry_t entry = pte_to_swp_entry( << 
3987         unsigned long marker = pte_marker_get << 
3988                                               << 
3989         /*                                    << 
3990          * PTE markers should never be empty. << 
3991          * the best thing to do is to kill th << 
3992          */                                   << 
3993         if (WARN_ON_ONCE(!marker))            << 
3994                 return VM_FAULT_SIGBUS;       << 
3995                                               << 
3996         /* Higher priority than uffd-wp when  << 
3997         if (marker & PTE_MARKER_POISONED)     << 
3998                 return VM_FAULT_HWPOISON;     << 
3999                                               << 
4000         if (pte_marker_entry_uffd_wp(entry))  << 
4001                 return pte_marker_handle_uffd << 
4002                                               << 
4003         /* This is an unknown pte marker */   << 
4004         return VM_FAULT_SIGBUS;               << 
4005 }                                             << 
4006                                               << 
4007 static struct folio *__alloc_swap_folio(struc << 
4008 {                                             << 
4009         struct vm_area_struct *vma = vmf->vma << 
4010         struct folio *folio;                  << 
4011         swp_entry_t entry;                    << 
4012                                               << 
4013         folio = vma_alloc_folio(GFP_HIGHUSER_ << 
4014                                 vmf->address, << 
4015         if (!folio)                           << 
4016                 return NULL;                  << 
4017                                               << 
4018         entry = pte_to_swp_entry(vmf->orig_pt << 
4019         if (mem_cgroup_swapin_charge_folio(fo << 
4020                                            GF << 
4021                 folio_put(folio);             << 
4022                 return NULL;                  << 
4023         }                                     << 
4024                                               << 
4025         return folio;                         << 
4026 }                                             << 
4027                                               << 
4028 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            << 
4029 static inline int non_swapcache_batch(swp_ent << 
4030 {                                             << 
4031         struct swap_info_struct *si = swp_swa << 
4032         pgoff_t offset = swp_offset(entry);   << 
4033         int i;                                << 
4034                                               << 
4035         /*                                    << 
4036          * While allocating a large folio and << 
4037          * the case the being faulted pte doe << 
4038          * ensure all PTEs have no cache as w << 
4039          * swap devices while the content is  << 
4040          */                                   << 
4041         for (i = 0; i < max_nr; i++) {        << 
4042                 if ((si->swap_map[offset + i] << 
4043                         return i;             << 
4044         }                                     << 
4045                                               << 
4046         return i;                             << 
4047 }                                             << 
4048                                               << 
4049 /*                                            << 
4050  * Check if the PTEs within a range are conti << 
4051  * and have consistent swapcache, zeromap.    << 
4052  */                                           << 
4053 static bool can_swapin_thp(struct vm_fault *v << 
4054 {                                             << 
4055         unsigned long addr;                   << 
4056         swp_entry_t entry;                    << 
4057         int idx;                              << 
4058         pte_t pte;                            << 
4059                                               << 
4060         addr = ALIGN_DOWN(vmf->address, nr_pa << 
4061         idx = (vmf->address - addr) / PAGE_SI << 
4062         pte = ptep_get(ptep);                 << 
4063                                               << 
4064         if (!pte_same(pte, pte_move_swp_offse << 
4065                 return false;                 << 
4066         entry = pte_to_swp_entry(pte);        << 
4067         if (swap_pte_batch(ptep, nr_pages, pt << 
4068                 return false;                 << 
4069                                               << 
4070         /*                                    << 
4071          * swap_read_folio() can't handle the << 
4072          * from different backends. And they  << 
4073          * things might be added once zswap s << 
4074          */                                   << 
4075         if (unlikely(swap_zeromap_batch(entry << 
4076                 return false;                 << 
4077         if (unlikely(non_swapcache_batch(entr << 
4078                 return false;                 << 
4079                                               << 
4080         return true;                          << 
4081 }                                             << 
4082                                               << 
4083 static inline unsigned long thp_swap_suitable << 
4084                                               << 
4085                                               << 
4086 {                                             << 
4087         int order, nr;                        << 
4088                                               << 
4089         order = highest_order(orders);        << 
4090                                               << 
4091         /*                                    << 
4092          * To swap in a THP with nr pages, we << 
4093          * is aligned with that number, as it << 
4094          * This helps filter out most invalid << 
4095          */                                   << 
4096         while (orders) {                      << 
4097                 nr = 1 << order;              << 
4098                 if ((addr >> PAGE_SHIFT) % nr << 
4099                         break;                << 
4100                 order = next_order(&orders, o << 
4101         }                                     << 
4102                                               << 
4103         return orders;                        << 
4104 }                                             << 
4105                                               << 
4106 static struct folio *alloc_swap_folio(struct  << 
4107 {                                             << 
4108         struct vm_area_struct *vma = vmf->vma << 
4109         unsigned long orders;                 << 
4110         struct folio *folio;                  << 
4111         unsigned long addr;                   << 
4112         swp_entry_t entry;                    << 
4113         spinlock_t *ptl;                      << 
4114         pte_t *pte;                           << 
4115         gfp_t gfp;                            << 
4116         int order;                            << 
4117                                               << 
4118         /*                                    << 
4119          * If uffd is active for the vma we n << 
4120          * maintain the uffd semantics.       << 
4121          */                                   << 
4122         if (unlikely(userfaultfd_armed(vma))) << 
4123                 goto fallback;                << 
4124                                               << 
4125         /*                                    << 
4126          * A large swapped out folio could be << 
4127          * lack handling for such cases, so f << 
4128          * folio.                             << 
4129          */                                   << 
4130         if (!zswap_never_enabled())           << 
4131                 goto fallback;                << 
4132                                               << 
4133         entry = pte_to_swp_entry(vmf->orig_pt << 
4134         /*                                    << 
4135          * Get a list of all the (large) orde << 
4136          * and suitable for swapping THP.     << 
4137          */                                   << 
4138         orders = thp_vma_allowable_orders(vma << 
4139                         TVA_IN_PF | TVA_ENFOR << 
4140         orders = thp_vma_suitable_orders(vma, << 
4141         orders = thp_swap_suitable_orders(swp << 
4142                                           vmf << 
4143                                               << 
4144         if (!orders)                          << 
4145                 goto fallback;                << 
4146                                               << 
4147         pte = pte_offset_map_lock(vmf->vma->v << 
4148                                   vmf->addres << 
4149         if (unlikely(!pte))                   << 
4150                 goto fallback;                << 
4151                                               << 
4152         /*                                    << 
4153          * For do_swap_page, find the highest << 
4154          * completely swap entries with conti << 
4155          */                                   << 
4156         order = highest_order(orders);        << 
4157         while (orders) {                      << 
4158                 addr = ALIGN_DOWN(vmf->addres << 
4159                 if (can_swapin_thp(vmf, pte + << 
4160                         break;                << 
4161                 order = next_order(&orders, o << 
4162         }                                     << 
4163                                               << 
4164         pte_unmap_unlock(pte, ptl);           << 
4165                                               << 
4166         /* Try allocating the highest of the  << 
4167         gfp = vma_thp_gfp_mask(vma);          << 
4168         while (orders) {                      << 
4169                 addr = ALIGN_DOWN(vmf->addres << 
4170                 folio = vma_alloc_folio(gfp,  << 
4171                 if (folio) {                  << 
4172                         if (!mem_cgroup_swapi << 
4173                                               << 
4174                                 return folio; << 
4175                         folio_put(folio);     << 
4176                 }                             << 
4177                 order = next_order(&orders, o << 
4178         }                                     << 
4179                                               << 
4180 fallback:                                     << 
4181         return __alloc_swap_folio(vmf);       << 
4182 }                                             << 
4183 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */      << 
4184 static struct folio *alloc_swap_folio(struct  << 
4185 {                                             << 
4186         return __alloc_swap_folio(vmf);       << 
4187 }                                             << 
4188 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */      << 
4189                                               << 
4190 static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq); << 
4191                                               << 
4192 /*                                            << 
4193  * We enter with non-exclusive mmap_lock (to  << 
4194  * but allow concurrent faults), and pte mapp    2910  * but allow concurrent faults), and pte mapped but not yet locked.
4195  * We return with pte unmapped and unlocked.     2911  * We return with pte unmapped and unlocked.
4196  *                                               2912  *
4197  * We return with the mmap_lock locked or unl !! 2913  * We return with the mmap_sem locked or unlocked in the same cases
4198  * as does filemap_fault().                      2914  * as does filemap_fault().
4199  */                                              2915  */
4200 vm_fault_t do_swap_page(struct vm_fault *vmf) !! 2916 int do_swap_page(struct vm_fault *vmf)
4201 {                                                2917 {
4202         struct vm_area_struct *vma = vmf->vma    2918         struct vm_area_struct *vma = vmf->vma;
4203         struct folio *swapcache, *folio = NUL !! 2919         struct page *page = NULL, *swapcache;
4204         DECLARE_WAITQUEUE(wait, current);     !! 2920         struct mem_cgroup *memcg;
4205         struct page *page;                    << 
4206         struct swap_info_struct *si = NULL;   << 
4207         rmap_t rmap_flags = RMAP_NONE;        << 
4208         bool need_clear_cache = false;        << 
4209         bool exclusive = false;               << 
4210         swp_entry_t entry;                       2921         swp_entry_t entry;
4211         pte_t pte;                               2922         pte_t pte;
4212         vm_fault_t ret = 0;                   !! 2923         int locked;
4213         void *shadow = NULL;                  !! 2924         int exclusive = 0;
4214         int nr_pages;                         !! 2925         int ret = 0;
4215         unsigned long page_idx;               << 
4216         unsigned long address;                << 
4217         pte_t *ptep;                          << 
4218                                                  2926 
4219         if (!pte_unmap_same(vmf))             !! 2927         if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
4220                 goto out;                        2928                 goto out;
4221                                                  2929 
4222         entry = pte_to_swp_entry(vmf->orig_pt    2930         entry = pte_to_swp_entry(vmf->orig_pte);
4223         if (unlikely(non_swap_entry(entry)))     2931         if (unlikely(non_swap_entry(entry))) {
4224                 if (is_migration_entry(entry)    2932                 if (is_migration_entry(entry)) {
4225                         migration_entry_wait(    2933                         migration_entry_wait(vma->vm_mm, vmf->pmd,
4226                                                  2934                                              vmf->address);
4227                 } else if (is_device_exclusiv << 
4228                         vmf->page = pfn_swap_ << 
4229                         ret = remove_device_e << 
4230                 } else if (is_device_private_    2935                 } else if (is_device_private_entry(entry)) {
4231                         if (vmf->flags & FAUL << 
4232                                 /*            << 
4233                                  * migrate_to << 
4234                                  * under VMA  << 
4235                                  */           << 
4236                                 vma_end_read( << 
4237                                 ret = VM_FAUL << 
4238                                 goto out;     << 
4239                         }                     << 
4240                                               << 
4241                         vmf->page = pfn_swap_ << 
4242                         vmf->pte = pte_offset << 
4243                                         vmf-> << 
4244                         if (unlikely(!vmf->pt << 
4245                                      !pte_sam << 
4246                                               << 
4247                                 goto unlock;  << 
4248                                               << 
4249                         /*                       2936                         /*
4250                          * Get a page referen !! 2937                          * For un-addressable device memory we call the pgmap
4251                          * freed.             !! 2938                          * fault handler callback. The callback must migrate
                                                   >> 2939                          * the page back to some CPU accessible page.
4252                          */                      2940                          */
4253                         get_page(vmf->page);  !! 2941                         ret = device_private_entry_fault(vma, vmf->address, entry,
4254                         pte_unmap_unlock(vmf- !! 2942                                                  vmf->flags, vmf->pmd);
4255                         ret = vmf->page->pgma << 
4256                         put_page(vmf->page);  << 
4257                 } else if (is_hwpoison_entry(    2943                 } else if (is_hwpoison_entry(entry)) {
4258                         ret = VM_FAULT_HWPOIS    2944                         ret = VM_FAULT_HWPOISON;
4259                 } else if (is_pte_marker_entr << 
4260                         ret = handle_pte_mark << 
4261                 } else {                         2945                 } else {
4262                         print_bad_pte(vma, vm    2946                         print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4263                         ret = VM_FAULT_SIGBUS    2947                         ret = VM_FAULT_SIGBUS;
4264                 }                                2948                 }
4265                 goto out;                        2949                 goto out;
4266         }                                        2950         }
4267                                                  2951 
4268         /* Prevent swapoff from happening to  << 
4269         si = get_swap_device(entry);          << 
4270         if (unlikely(!si))                    << 
4271                 goto out;                     << 
4272                                               << 
4273         folio = swap_cache_get_folio(entry, v << 
4274         if (folio)                            << 
4275                 page = folio_file_page(folio, << 
4276         swapcache = folio;                    << 
4277                                               << 
4278         if (!folio) {                         << 
4279                 if (data_race(si->flags & SWP << 
4280                     __swap_count(entry) == 1) << 
4281                         /* skip swapcache */  << 
4282                         folio = alloc_swap_fo << 
4283                         if (folio) {          << 
4284                                 __folio_set_l << 
4285                                 __folio_set_s << 
4286                                               << 
4287                                 nr_pages = fo << 
4288                                 if (folio_tes << 
4289                                         entry << 
4290                                 /*            << 
4291                                  * Prevent pa << 
4292                                  * the cache  << 
4293                                  * may finish << 
4294                                  * swapout re << 
4295                                  * undetectab << 
4296                                  * to entry r << 
4297                                  */           << 
4298                                 if (swapcache << 
4299                                         /*    << 
4300                                          * Re << 
4301                                          * re << 
4302                                          */   << 
4303                                         add_w << 
4304                                         sched << 
4305                                         remov << 
4306                                         goto  << 
4307                                 }             << 
4308                                 need_clear_ca << 
4309                                               << 
4310                                 mem_cgroup_sw << 
4311                                                  2952 
4312                                 shadow = get_ !! 2953         delayacct_set_flag(DELAYACCT_PF_SWAPIN);
4313                                 if (shadow)   !! 2954         page = lookup_swap_cache(entry, vma, vmf->address);
4314                                         worki !! 2955         swapcache = page;
4315                                                  2956 
4316                                 folio_add_lru !! 2957         if (!page) {
                                                   >> 2958                 struct swap_info_struct *si = swp_swap_info(entry);
4317                                                  2959 
4318                                 /* To provide !! 2960                 if (si->flags & SWP_SYNCHRONOUS_IO &&
4319                                 folio->swap = !! 2961                                 __swap_count(si, entry) == 1) {
4320                                 swap_read_fol !! 2962                         /* skip swapcache */
4321                                 folio->privat !! 2963                         page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
                                                   >> 2964                                                         vmf->address);
                                                   >> 2965                         if (page) {
                                                   >> 2966                                 __SetPageLocked(page);
                                                   >> 2967                                 __SetPageSwapBacked(page);
                                                   >> 2968                                 set_page_private(page, entry.val);
                                                   >> 2969                                 lru_cache_add_anon(page);
                                                   >> 2970                                 swap_readpage(page, true);
4322                         }                        2971                         }
4323                 } else {                         2972                 } else {
4324                         folio = swapin_readah !! 2973                         page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
4325                                                  2974                                                 vmf);
4326                         swapcache = folio;    !! 2975                         swapcache = page;
4327                 }                                2976                 }
4328                                                  2977 
4329                 if (!folio) {                 !! 2978                 if (!page) {
4330                         /*                       2979                         /*
4331                          * Back out if somebo    2980                          * Back out if somebody else faulted in this pte
4332                          * while we released     2981                          * while we released the pte lock.
4333                          */                      2982                          */
4334                         vmf->pte = pte_offset    2983                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4335                                         vmf->    2984                                         vmf->address, &vmf->ptl);
4336                         if (likely(vmf->pte & !! 2985                         if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
4337                                    pte_same(p << 
4338                                 ret = VM_FAUL    2986                                 ret = VM_FAULT_OOM;
                                                   >> 2987                         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
4339                         goto unlock;             2988                         goto unlock;
4340                 }                                2989                 }
4341                                                  2990 
4342                 /* Had to read the page from     2991                 /* Had to read the page from swap area: Major fault */
4343                 ret = VM_FAULT_MAJOR;            2992                 ret = VM_FAULT_MAJOR;
4344                 count_vm_event(PGMAJFAULT);      2993                 count_vm_event(PGMAJFAULT);
4345                 count_memcg_event_mm(vma->vm_    2994                 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4346                 page = folio_file_page(folio, << 
4347         } else if (PageHWPoison(page)) {         2995         } else if (PageHWPoison(page)) {
4348                 /*                               2996                 /*
4349                  * hwpoisoned dirty swapcache    2997                  * hwpoisoned dirty swapcache pages are kept for killing
4350                  * owner processes (which may    2998                  * owner processes (which may be unknown at hwpoison time)
4351                  */                              2999                  */
4352                 ret = VM_FAULT_HWPOISON;         3000                 ret = VM_FAULT_HWPOISON;
                                                   >> 3001                 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
4353                 goto out_release;                3002                 goto out_release;
4354         }                                        3003         }
4355                                                  3004 
4356         ret |= folio_lock_or_retry(folio, vmf !! 3005         locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
4357         if (ret & VM_FAULT_RETRY)             << 
4358                 goto out_release;             << 
4359                                                  3006 
4360         if (swapcache) {                      !! 3007         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
4361                 /*                            !! 3008         if (!locked) {
4362                  * Make sure folio_free_swap( !! 3009                 ret |= VM_FAULT_RETRY;
4363                  * swapcache from under us.   !! 3010                 goto out_release;
4364                  * below, are not enough to e !! 3011         }
4365                  * swapcache, we need to chec << 
4366                  * changed.                   << 
4367                  */                           << 
4368                 if (unlikely(!folio_test_swap << 
4369                              page_swap_entry( << 
4370                         goto out_page;        << 
4371                                                  3012 
4372                 /*                            !! 3013         /*
4373                  * KSM sometimes has to copy  !! 3014          * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
4374                  * page->index of !PageKSM()  !! 3015          * release the swapcache from under us.  The page pin, and pte_same
4375                  * anon VMA -- PageKSM() is l !! 3016          * test below, are not enough to exclude that.  Even if it is still
4376                  */                           !! 3017          * swapcache, we need to check that the page's swap has not changed.
4377                 folio = ksm_might_need_to_cop !! 3018          */
4378                 if (unlikely(!folio)) {       !! 3019         if (unlikely((!PageSwapCache(page) ||
4379                         ret = VM_FAULT_OOM;   !! 3020                         page_private(page) != entry.val)) && swapcache)
4380                         folio = swapcache;    !! 3021                 goto out_page;
4381                         goto out_page;        << 
4382                 } else if (unlikely(folio ==  << 
4383                         ret = VM_FAULT_HWPOIS << 
4384                         folio = swapcache;    << 
4385                         goto out_page;        << 
4386                 }                             << 
4387                 if (folio != swapcache)       << 
4388                         page = folio_page(fol << 
4389                                                  3022 
4390                 /*                            !! 3023         page = ksm_might_need_to_copy(page, vma, vmf->address);
4391                  * If we want to map a page t !! 3024         if (unlikely(!page)) {
4392                  * have to detect via the ref !! 3025                 ret = VM_FAULT_OOM;
4393                  * owner. Try removing the ex !! 3026                 page = swapcache;
4394                  * caches if required.        !! 3027                 goto out_page;
4395                  */                           << 
4396                 if ((vmf->flags & FAULT_FLAG_ << 
4397                     !folio_test_ksm(folio) && << 
4398                         lru_add_drain();      << 
4399         }                                        3028         }
4400                                                  3029 
4401         folio_throttle_swaprate(folio, GFP_KE !! 3030         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
                                                   >> 3031                                 &memcg, false)) {
                                                   >> 3032                 ret = VM_FAULT_OOM;
                                                   >> 3033                 goto out_page;
                                                   >> 3034         }
4402                                                  3035 
4403         /*                                       3036         /*
4404          * Back out if somebody else already     3037          * Back out if somebody else already faulted in this pte.
4405          */                                      3038          */
4406         vmf->pte = pte_offset_map_lock(vma->v    3039         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4407                         &vmf->ptl);              3040                         &vmf->ptl);
4408         if (unlikely(!vmf->pte || !pte_same(p !! 3041         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
4409                 goto out_nomap;                  3042                 goto out_nomap;
4410                                                  3043 
4411         if (unlikely(!folio_test_uptodate(fol !! 3044         if (unlikely(!PageUptodate(page))) {
4412                 ret = VM_FAULT_SIGBUS;           3045                 ret = VM_FAULT_SIGBUS;
4413                 goto out_nomap;                  3046                 goto out_nomap;
4414         }                                        3047         }
4415                                                  3048 
4416         /* allocated large folios for SWP_SYN << 
4417         if (folio_test_large(folio) && !folio << 
4418                 unsigned long nr = folio_nr_p << 
4419                 unsigned long folio_start = A << 
4420                 unsigned long idx = (vmf->add << 
4421                 pte_t *folio_ptep = vmf->pte  << 
4422                 pte_t folio_pte = ptep_get(fo << 
4423                                               << 
4424                 if (!pte_same(folio_pte, pte_ << 
4425                     swap_pte_batch(folio_ptep << 
4426                         goto out_nomap;       << 
4427                                               << 
4428                 page_idx = idx;               << 
4429                 address = folio_start;        << 
4430                 ptep = folio_ptep;            << 
4431                 goto check_folio;             << 
4432         }                                     << 
4433                                               << 
4434         nr_pages = 1;                         << 
4435         page_idx = 0;                         << 
4436         address = vmf->address;               << 
4437         ptep = vmf->pte;                      << 
4438         if (folio_test_large(folio) && folio_ << 
4439                 int nr = folio_nr_pages(folio << 
4440                 unsigned long idx = folio_pag << 
4441                 unsigned long folio_start = a << 
4442                 unsigned long folio_end = fol << 
4443                 pte_t *folio_ptep;            << 
4444                 pte_t folio_pte;              << 
4445                                               << 
4446                 if (unlikely(folio_start < ma << 
4447                         goto check_folio;     << 
4448                 if (unlikely(folio_end > pmd_ << 
4449                         goto check_folio;     << 
4450                                               << 
4451                 folio_ptep = vmf->pte - idx;  << 
4452                 folio_pte = ptep_get(folio_pt << 
4453                 if (!pte_same(folio_pte, pte_ << 
4454                     swap_pte_batch(folio_ptep << 
4455                         goto check_folio;     << 
4456                                               << 
4457                 page_idx = idx;               << 
4458                 address = folio_start;        << 
4459                 ptep = folio_ptep;            << 
4460                 nr_pages = nr;                << 
4461                 entry = folio->swap;          << 
4462                 page = &folio->page;          << 
4463         }                                     << 
4464                                               << 
4465 check_folio:                                  << 
4466         /*                                    << 
4467          * PG_anon_exclusive reuses PG_mapped << 
4468          * must never point at an anonymous p << 
4469          * PG_anon_exclusive. Sanity check th << 
4470          * no filesystem set PG_mappedtodisk  << 
4471          * check after taking the PT lock and << 
4472          * concurrently faulted in this page  << 
4473          */                                   << 
4474         BUG_ON(!folio_test_anon(folio) && fol << 
4475         BUG_ON(folio_test_anon(folio) && Page << 
4476                                               << 
4477         /*                                    << 
4478          * Check under PT lock (to protect ag << 
4479          * the swap entry concurrently) for c << 
4480          */                                   << 
4481         if (!folio_test_ksm(folio)) {         << 
4482                 exclusive = pte_swp_exclusive << 
4483                 if (folio != swapcache) {     << 
4484                         /*                    << 
4485                          * We have a fresh pa << 
4486                          * swapcache -> certa << 
4487                          */                   << 
4488                         exclusive = true;     << 
4489                 } else if (exclusive && folio << 
4490                           data_race(si->flags << 
4491                         /*                    << 
4492                          * This is tricky: no << 
4493                          * concurrent page mo << 
4494                          *                    << 
4495                          * So if we stumble o << 
4496                          * we must not set th << 
4497                          * map it writable wi << 
4498                          * while still under  << 
4499                          *                    << 
4500                          * For these problema << 
4501                          * exclusive marker:  << 
4502                          * writeback only if  << 
4503                          * there are no unexp << 
4504                          * unmapping succeede << 
4505                          * further GUP refere << 
4506                          * appear, so droppin << 
4507                          * it only R/O is fin << 
4508                          */                   << 
4509                         exclusive = false;    << 
4510                 }                             << 
4511         }                                     << 
4512                                               << 
4513         /*                                    << 
4514          * Some architectures may have to res << 
4515          * when reading from swap. This metad << 
4516          * so this must be called before swap << 
4517          */                                   << 
4518         arch_swap_restore(folio_swap(entry, f << 
4519                                               << 
4520         /*                                       3049         /*
4521          * Remove the swap entry and conditio !! 3050          * The page isn't present yet, go ahead with the fault.
4522          * We're already holding a reference  !! 3051          *
4523          * yet.                               !! 3052          * Be careful about the sequence of operations here.
                                                   >> 3053          * To get its accounting right, reuse_swap_page() must be called
                                                   >> 3054          * while the page is counted on swap but not yet in mapcount i.e.
                                                   >> 3055          * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
                                                   >> 3056          * must be called after the swap_free(), or it will never succeed.
4524          */                                      3057          */
4525         swap_free_nr(entry, nr_pages);        << 
4526         if (should_try_to_free_swap(folio, vm << 
4527                 folio_free_swap(folio);       << 
4528                                                  3058 
4529         add_mm_counter(vma->vm_mm, MM_ANONPAG !! 3059         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4530         add_mm_counter(vma->vm_mm, MM_SWAPENT !! 3060         dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
4531         pte = mk_pte(page, vma->vm_page_prot)    3061         pte = mk_pte(page, vma->vm_page_prot);
                                                   >> 3062         if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
                                                   >> 3063                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                                                   >> 3064                 vmf->flags &= ~FAULT_FLAG_WRITE;
                                                   >> 3065                 ret |= VM_FAULT_WRITE;
                                                   >> 3066                 exclusive = RMAP_EXCLUSIVE;
                                                   >> 3067         }
                                                   >> 3068         flush_icache_page(vma, page);
4532         if (pte_swp_soft_dirty(vmf->orig_pte)    3069         if (pte_swp_soft_dirty(vmf->orig_pte))
4533                 pte = pte_mksoft_dirty(pte);     3070                 pte = pte_mksoft_dirty(pte);
4534         if (pte_swp_uffd_wp(vmf->orig_pte))   !! 3071         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4535                 pte = pte_mkuffd_wp(pte);     !! 3072         arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
4536                                               !! 3073         vmf->orig_pte = pte;
4537         /*                                    << 
4538          * Same logic as in do_wp_page(); how << 
4539          * certainly not shared either becaus << 
4540          * exposing them to the swapcache or  << 
4541          * exclusivity.                       << 
4542          */                                   << 
4543         if (!folio_test_ksm(folio) &&         << 
4544             (exclusive || folio_ref_count(fol << 
4545                 if ((vma->vm_flags & VM_WRITE << 
4546                     !pte_needs_soft_dirty_wp( << 
4547                         pte = pte_mkwrite(pte << 
4548                         if (vmf->flags & FAUL << 
4549                                 pte = pte_mkd << 
4550                                 vmf->flags &= << 
4551                         }                     << 
4552                 }                             << 
4553                 rmap_flags |= RMAP_EXCLUSIVE; << 
4554         }                                     << 
4555         folio_ref_add(folio, nr_pages - 1);   << 
4556         flush_icache_pages(vma, page, nr_page << 
4557         vmf->orig_pte = pte_advance_pfn(pte,  << 
4558                                                  3074 
4559         /* ksm created a completely new copy     3075         /* ksm created a completely new copy */
4560         if (unlikely(folio != swapcache && sw !! 3076         if (unlikely(page != swapcache && swapcache)) {
4561                 folio_add_new_anon_rmap(folio !! 3077                 page_add_new_anon_rmap(page, vma, vmf->address, false);
4562                 folio_add_lru_vma(folio, vma) !! 3078                 mem_cgroup_commit_charge(page, memcg, false, false);
4563         } else if (!folio_test_anon(folio)) { !! 3079                 lru_cache_add_active_or_unevictable(page, vma);
4564                 /*                            << 
4565                  * We currently only expect s << 
4566                  * fully exclusive or fully s << 
4567                  * folios which are fully exc << 
4568                  * folios within swapcache he << 
4569                  */                           << 
4570                 VM_WARN_ON_ONCE(folio_test_la << 
4571                 VM_WARN_ON_FOLIO(!folio_test_ << 
4572                 folio_add_new_anon_rmap(folio << 
4573         } else {                                 3080         } else {
4574                 folio_add_anon_rmap_ptes(foli !! 3081                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
4575                                         rmap_ !! 3082                 mem_cgroup_commit_charge(page, memcg, true, false);
                                                   >> 3083                 activate_page(page);
4576         }                                        3084         }
4577                                                  3085 
4578         VM_BUG_ON(!folio_test_anon(folio) ||  !! 3086         swap_free(entry);
4579                         (pte_write(pte) && !P !! 3087         if (mem_cgroup_swap_full(page) ||
4580         set_ptes(vma->vm_mm, address, ptep, p !! 3088             (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
4581         arch_do_swap_page_nr(vma->vm_mm, vma, !! 3089                 try_to_free_swap(page);
4582                         pte, pte, nr_pages);  !! 3090         unlock_page(page);
4583                                               !! 3091         if (page != swapcache && swapcache) {
4584         folio_unlock(folio);                  << 
4585         if (folio != swapcache && swapcache)  << 
4586                 /*                               3092                 /*
4587                  * Hold the lock to avoid the    3093                  * Hold the lock to avoid the swap entry to be reused
4588                  * until we take the PT lock     3094                  * until we take the PT lock for the pte_same() check
4589                  * (to avoid false positives     3095                  * (to avoid false positives from pte_same). For
4590                  * further safety release the    3096                  * further safety release the lock after the swap_free
4591                  * so that the swap count won    3097                  * so that the swap count won't change under a
4592                  * parallel locked swapcache.    3098                  * parallel locked swapcache.
4593                  */                              3099                  */
4594                 folio_unlock(swapcache);      !! 3100                 unlock_page(swapcache);
4595                 folio_put(swapcache);         !! 3101                 put_page(swapcache);
4596         }                                        3102         }
4597                                                  3103 
4598         if (vmf->flags & FAULT_FLAG_WRITE) {     3104         if (vmf->flags & FAULT_FLAG_WRITE) {
4599                 ret |= do_wp_page(vmf);          3105                 ret |= do_wp_page(vmf);
4600                 if (ret & VM_FAULT_ERROR)        3106                 if (ret & VM_FAULT_ERROR)
4601                         ret &= VM_FAULT_ERROR    3107                         ret &= VM_FAULT_ERROR;
4602                 goto out;                        3108                 goto out;
4603         }                                        3109         }
4604                                                  3110 
4605         /* No need to invalidate - it was non    3111         /* No need to invalidate - it was non-present before */
4606         update_mmu_cache_range(vmf, vma, addr !! 3112         update_mmu_cache(vma, vmf->address, vmf->pte);
4607 unlock:                                          3113 unlock:
4608         if (vmf->pte)                         !! 3114         pte_unmap_unlock(vmf->pte, vmf->ptl);
4609                 pte_unmap_unlock(vmf->pte, vm << 
4610 out:                                             3115 out:
4611         /* Clear the swap cache pin for direc << 
4612         if (need_clear_cache) {               << 
4613                 swapcache_clear(si, entry, nr << 
4614                 if (waitqueue_active(&swapcac << 
4615                         wake_up(&swapcache_wq << 
4616         }                                     << 
4617         if (si)                               << 
4618                 put_swap_device(si);          << 
4619         return ret;                              3116         return ret;
4620 out_nomap:                                       3117 out_nomap:
4621         if (vmf->pte)                         !! 3118         mem_cgroup_cancel_charge(page, memcg, false);
4622                 pte_unmap_unlock(vmf->pte, vm !! 3119         pte_unmap_unlock(vmf->pte, vmf->ptl);
4623 out_page:                                        3120 out_page:
4624         folio_unlock(folio);                  !! 3121         unlock_page(page);
4625 out_release:                                     3122 out_release:
4626         folio_put(folio);                     !! 3123         put_page(page);
4627         if (folio != swapcache && swapcache)  !! 3124         if (page != swapcache && swapcache) {
4628                 folio_unlock(swapcache);      !! 3125                 unlock_page(swapcache);
4629                 folio_put(swapcache);         !! 3126                 put_page(swapcache);
4630         }                                     << 
4631         if (need_clear_cache) {               << 
4632                 swapcache_clear(si, entry, nr << 
4633                 if (waitqueue_active(&swapcac << 
4634                         wake_up(&swapcache_wq << 
4635         }                                        3127         }
4636         if (si)                               << 
4637                 put_swap_device(si);          << 
4638         return ret;                              3128         return ret;
4639 }                                                3129 }
4640                                                  3130 
4641 static bool pte_range_none(pte_t *pte, int nr << 
4642 {                                             << 
4643         int i;                                << 
4644                                               << 
4645         for (i = 0; i < nr_pages; i++) {      << 
4646                 if (!pte_none(ptep_get_lockle << 
4647                         return false;         << 
4648         }                                     << 
4649                                               << 
4650         return true;                          << 
4651 }                                             << 
4652                                               << 
4653 static struct folio *alloc_anon_folio(struct  << 
4654 {                                             << 
4655         struct vm_area_struct *vma = vmf->vma << 
4656 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            << 
4657         unsigned long orders;                 << 
4658         struct folio *folio;                  << 
4659         unsigned long addr;                   << 
4660         pte_t *pte;                           << 
4661         gfp_t gfp;                            << 
4662         int order;                            << 
4663                                               << 
4664         /*                                    << 
4665          * If uffd is active for the vma we n << 
4666          * maintain the uffd semantics.       << 
4667          */                                   << 
4668         if (unlikely(userfaultfd_armed(vma))) << 
4669                 goto fallback;                << 
4670                                               << 
4671         /*                                    << 
4672          * Get a list of all the (large) orde << 
4673          * for this vma. Then filter out the  << 
4674          * the faulting address and still be  << 
4675          */                                   << 
4676         orders = thp_vma_allowable_orders(vma << 
4677                         TVA_IN_PF | TVA_ENFOR << 
4678         orders = thp_vma_suitable_orders(vma, << 
4679                                               << 
4680         if (!orders)                          << 
4681                 goto fallback;                << 
4682                                               << 
4683         pte = pte_offset_map(vmf->pmd, vmf->a << 
4684         if (!pte)                             << 
4685                 return ERR_PTR(-EAGAIN);      << 
4686                                               << 
4687         /*                                    << 
4688          * Find the highest order where the a << 
4689          * pte_none(). Note that all remainin << 
4690          * pte_none().                        << 
4691          */                                   << 
4692         order = highest_order(orders);        << 
4693         while (orders) {                      << 
4694                 addr = ALIGN_DOWN(vmf->addres << 
4695                 if (pte_range_none(pte + pte_ << 
4696                         break;                << 
4697                 order = next_order(&orders, o << 
4698         }                                     << 
4699                                               << 
4700         pte_unmap(pte);                       << 
4701                                               << 
4702         if (!orders)                          << 
4703                 goto fallback;                << 
4704                                               << 
4705         /* Try allocating the highest of the  << 
4706         gfp = vma_thp_gfp_mask(vma);          << 
4707         while (orders) {                      << 
4708                 addr = ALIGN_DOWN(vmf->addres << 
4709                 folio = vma_alloc_folio(gfp,  << 
4710                 if (folio) {                  << 
4711                         if (mem_cgroup_charge << 
4712                                 count_mthp_st << 
4713                                 folio_put(fol << 
4714                                 goto next;    << 
4715                         }                     << 
4716                         folio_throttle_swapra << 
4717                         folio_zero_user(folio << 
4718                         return folio;         << 
4719                 }                             << 
4720 next:                                         << 
4721                 count_mthp_stat(order, MTHP_S << 
4722                 order = next_order(&orders, o << 
4723         }                                     << 
4724                                               << 
4725 fallback:                                     << 
4726 #endif                                        << 
4727         return folio_prealloc(vma->vm_mm, vma << 
4728 }                                             << 
4729                                               << 
4730 /*                                               3131 /*
4731  * We enter with non-exclusive mmap_lock (to  !! 3132  * We enter with non-exclusive mmap_sem (to exclude vma changes,
4732  * but allow concurrent faults), and pte mapp    3133  * but allow concurrent faults), and pte mapped but not yet locked.
4733  * We return with mmap_lock still held, but p !! 3134  * We return with mmap_sem still held, but pte unmapped and unlocked.
4734  */                                              3135  */
4735 static vm_fault_t do_anonymous_page(struct vm !! 3136 static int do_anonymous_page(struct vm_fault *vmf)
4736 {                                                3137 {
4737         struct vm_area_struct *vma = vmf->vma    3138         struct vm_area_struct *vma = vmf->vma;
4738         unsigned long addr = vmf->address;    !! 3139         struct mem_cgroup *memcg;
4739         struct folio *folio;                  !! 3140         struct page *page;
4740         vm_fault_t ret = 0;                   !! 3141         int ret = 0;
4741         int nr_pages = 1;                     << 
4742         pte_t entry;                             3142         pte_t entry;
4743                                                  3143 
4744         /* File mapping without ->vm_ops ? */    3144         /* File mapping without ->vm_ops ? */
4745         if (vma->vm_flags & VM_SHARED)           3145         if (vma->vm_flags & VM_SHARED)
4746                 return VM_FAULT_SIGBUS;          3146                 return VM_FAULT_SIGBUS;
4747                                                  3147 
4748         /*                                       3148         /*
4749          * Use pte_alloc() instead of pte_all !! 3149          * Use pte_alloc() instead of pte_alloc_map().  We can't run
4750          * be distinguished from a transient  !! 3150          * pte_offset_map() on pmds where a huge pmd might be created
                                                   >> 3151          * from a different thread.
                                                   >> 3152          *
                                                   >> 3153          * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
                                                   >> 3154          * parallel threads are excluded by other means.
                                                   >> 3155          *
                                                   >> 3156          * Here we only have down_read(mmap_sem).
4751          */                                      3157          */
4752         if (pte_alloc(vma->vm_mm, vmf->pmd))  !! 3158         if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
4753                 return VM_FAULT_OOM;             3159                 return VM_FAULT_OOM;
4754                                                  3160 
                                                   >> 3161         /* See the comment in pte_alloc_one_map() */
                                                   >> 3162         if (unlikely(pmd_trans_unstable(vmf->pmd)))
                                                   >> 3163                 return 0;
                                                   >> 3164 
4755         /* Use the zero-page for reads */        3165         /* Use the zero-page for reads */
4756         if (!(vmf->flags & FAULT_FLAG_WRITE)     3166         if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4757                         !mm_forbids_zeropage(    3167                         !mm_forbids_zeropage(vma->vm_mm)) {
4758                 entry = pte_mkspecial(pfn_pte    3168                 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4759                                                  3169                                                 vma->vm_page_prot));
4760                 vmf->pte = pte_offset_map_loc    3170                 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4761                                 vmf->address,    3171                                 vmf->address, &vmf->ptl);
4762                 if (!vmf->pte)                !! 3172                 if (!pte_none(*vmf->pte))
4763                         goto unlock;          << 
4764                 if (vmf_pte_changed(vmf)) {   << 
4765                         update_mmu_tlb(vma, v << 
4766                         goto unlock;             3173                         goto unlock;
4767                 }                             << 
4768                 ret = check_stable_address_sp    3174                 ret = check_stable_address_space(vma->vm_mm);
4769                 if (ret)                         3175                 if (ret)
4770                         goto unlock;             3176                         goto unlock;
4771                 /* Deliver the page fault to     3177                 /* Deliver the page fault to userland, check inside PT lock */
4772                 if (userfaultfd_missing(vma))    3178                 if (userfaultfd_missing(vma)) {
4773                         pte_unmap_unlock(vmf-    3179                         pte_unmap_unlock(vmf->pte, vmf->ptl);
4774                         return handle_userfau    3180                         return handle_userfault(vmf, VM_UFFD_MISSING);
4775                 }                                3181                 }
4776                 goto setpte;                     3182                 goto setpte;
4777         }                                        3183         }
4778                                                  3184 
4779         /* Allocate our own private page. */     3185         /* Allocate our own private page. */
4780         ret = vmf_anon_prepare(vmf);          !! 3186         if (unlikely(anon_vma_prepare(vma)))
4781         if (ret)                              !! 3187                 goto oom;
4782                 return ret;                   !! 3188         page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
4783         /* Returns NULL on OOM or ERR_PTR(-EA !! 3189         if (!page)
4784         folio = alloc_anon_folio(vmf);        << 
4785         if (IS_ERR(folio))                    << 
4786                 return 0;                     << 
4787         if (!folio)                           << 
4788                 goto oom;                        3190                 goto oom;
4789                                                  3191 
4790         nr_pages = folio_nr_pages(folio);     !! 3192         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
4791         addr = ALIGN_DOWN(vmf->address, nr_pa !! 3193                 goto oom_free_page;
4792                                                  3194 
4793         /*                                       3195         /*
4794          * The memory barrier inside __folio_ !! 3196          * The memory barrier inside __SetPageUptodate makes sure that
4795          * preceding stores to the page conte !! 3197          * preceeding stores to the page contents become visible before
4796          * the set_pte_at() write.               3198          * the set_pte_at() write.
4797          */                                      3199          */
4798         __folio_mark_uptodate(folio);         !! 3200         __SetPageUptodate(page);
4799                                                  3201 
4800         entry = mk_pte(&folio->page, vma->vm_ !! 3202         entry = mk_pte(page, vma->vm_page_prot);
4801         entry = pte_sw_mkyoung(entry);        << 
4802         if (vma->vm_flags & VM_WRITE)            3203         if (vma->vm_flags & VM_WRITE)
4803                 entry = pte_mkwrite(pte_mkdir !! 3204                 entry = pte_mkwrite(pte_mkdirty(entry));
4804                                                  3205 
4805         vmf->pte = pte_offset_map_lock(vma->v !! 3206         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4806         if (!vmf->pte)                        !! 3207                         &vmf->ptl);
4807                 goto release;                 !! 3208         if (!pte_none(*vmf->pte))
4808         if (nr_pages == 1 && vmf_pte_changed( << 
4809                 update_mmu_tlb(vma, addr, vmf << 
4810                 goto release;                 << 
4811         } else if (nr_pages > 1 && !pte_range << 
4812                 update_mmu_tlb_range(vma, add << 
4813                 goto release;                    3209                 goto release;
4814         }                                     << 
4815                                                  3210 
4816         ret = check_stable_address_space(vma-    3211         ret = check_stable_address_space(vma->vm_mm);
4817         if (ret)                                 3212         if (ret)
4818                 goto release;                    3213                 goto release;
4819                                                  3214 
4820         /* Deliver the page fault to userland    3215         /* Deliver the page fault to userland, check inside PT lock */
4821         if (userfaultfd_missing(vma)) {          3216         if (userfaultfd_missing(vma)) {
4822                 pte_unmap_unlock(vmf->pte, vm    3217                 pte_unmap_unlock(vmf->pte, vmf->ptl);
4823                 folio_put(folio);             !! 3218                 mem_cgroup_cancel_charge(page, memcg, false);
                                                   >> 3219                 put_page(page);
4824                 return handle_userfault(vmf,     3220                 return handle_userfault(vmf, VM_UFFD_MISSING);
4825         }                                        3221         }
4826                                                  3222 
4827         folio_ref_add(folio, nr_pages - 1);   !! 3223         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4828         add_mm_counter(vma->vm_mm, MM_ANONPAG !! 3224         page_add_new_anon_rmap(page, vma, vmf->address, false);
4829         count_mthp_stat(folio_order(folio), M !! 3225         mem_cgroup_commit_charge(page, memcg, false, false);
4830         folio_add_new_anon_rmap(folio, vma, a !! 3226         lru_cache_add_active_or_unevictable(page, vma);
4831         folio_add_lru_vma(folio, vma);        << 
4832 setpte:                                          3227 setpte:
4833         if (vmf_orig_pte_uffd_wp(vmf))        !! 3228         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4834                 entry = pte_mkuffd_wp(entry); << 
4835         set_ptes(vma->vm_mm, addr, vmf->pte,  << 
4836                                                  3229 
4837         /* No need to invalidate - it was non    3230         /* No need to invalidate - it was non-present before */
4838         update_mmu_cache_range(vmf, vma, addr !! 3231         update_mmu_cache(vma, vmf->address, vmf->pte);
4839 unlock:                                          3232 unlock:
4840         if (vmf->pte)                         !! 3233         pte_unmap_unlock(vmf->pte, vmf->ptl);
4841                 pte_unmap_unlock(vmf->pte, vm << 
4842         return ret;                              3234         return ret;
4843 release:                                         3235 release:
4844         folio_put(folio);                     !! 3236         mem_cgroup_cancel_charge(page, memcg, false);
                                                   >> 3237         put_page(page);
4845         goto unlock;                             3238         goto unlock;
                                                   >> 3239 oom_free_page:
                                                   >> 3240         put_page(page);
4846 oom:                                             3241 oom:
4847         return VM_FAULT_OOM;                     3242         return VM_FAULT_OOM;
4848 }                                                3243 }
4849                                                  3244 
4850 /*                                               3245 /*
4851  * The mmap_lock must have been held on entry !! 3246  * The mmap_sem must have been held on entry, and may have been
4852  * released depending on flags and vma->vm_op    3247  * released depending on flags and vma->vm_ops->fault() return value.
4853  * See filemap_fault() and __lock_page_retry(    3248  * See filemap_fault() and __lock_page_retry().
4854  */                                              3249  */
4855 static vm_fault_t __do_fault(struct vm_fault  !! 3250 static int __do_fault(struct vm_fault *vmf)
4856 {                                                3251 {
4857         struct vm_area_struct *vma = vmf->vma    3252         struct vm_area_struct *vma = vmf->vma;
4858         struct folio *folio;                  !! 3253         int ret;
4859         vm_fault_t ret;                       << 
4860                                               << 
4861         /*                                    << 
4862          * Preallocate pte before we take pag << 
4863          * deadlocks for memcg reclaim which  << 
4864          *                              lock_ << 
4865          *                              SetPa << 
4866          *                              unloc << 
4867          * lock_page(B)                       << 
4868          *                              lock_ << 
4869          * pte_alloc_one                      << 
4870          *   shrink_folio_list                << 
4871          *     wait_on_page_writeback(A)      << 
4872          *                              SetPa << 
4873          *                              unloc << 
4874          *                              # flu << 
4875          */                                   << 
4876         if (pmd_none(*vmf->pmd) && !vmf->prea << 
4877                 vmf->prealloc_pte = pte_alloc << 
4878                 if (!vmf->prealloc_pte)       << 
4879                         return VM_FAULT_OOM;  << 
4880         }                                     << 
4881                                                  3254 
4882         ret = vma->vm_ops->fault(vmf);           3255         ret = vma->vm_ops->fault(vmf);
4883         if (unlikely(ret & (VM_FAULT_ERROR |     3256         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4884                             VM_FAULT_DONE_COW    3257                             VM_FAULT_DONE_COW)))
4885                 return ret;                      3258                 return ret;
4886                                                  3259 
4887         folio = page_folio(vmf->page);        << 
4888         if (unlikely(PageHWPoison(vmf->page))    3260         if (unlikely(PageHWPoison(vmf->page))) {
4889                 vm_fault_t poisonret = VM_FAU !! 3261                 if (ret & VM_FAULT_LOCKED)
4890                 if (ret & VM_FAULT_LOCKED) {  !! 3262                         unlock_page(vmf->page);
4891                         if (page_mapped(vmf-> !! 3263                 put_page(vmf->page);
4892                                 unmap_mapping << 
4893                         /* Retry if a clean f << 
4894                         if (mapping_evict_fol << 
4895                                 poisonret = V << 
4896                         folio_unlock(folio);  << 
4897                 }                             << 
4898                 folio_put(folio);             << 
4899                 vmf->page = NULL;                3264                 vmf->page = NULL;
4900                 return poisonret;             !! 3265                 return VM_FAULT_HWPOISON;
4901         }                                        3266         }
4902                                                  3267 
4903         if (unlikely(!(ret & VM_FAULT_LOCKED)    3268         if (unlikely(!(ret & VM_FAULT_LOCKED)))
4904                 folio_lock(folio);            !! 3269                 lock_page(vmf->page);
4905         else                                     3270         else
4906                 VM_BUG_ON_PAGE(!folio_test_lo !! 3271                 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
4907                                                  3272 
4908         return ret;                              3273         return ret;
4909 }                                                3274 }
4910                                                  3275 
4911 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            !! 3276 /*
                                                   >> 3277  * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
                                                   >> 3278  * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
                                                   >> 3279  * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
                                                   >> 3280  * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
                                                   >> 3281  */
                                                   >> 3282 static int pmd_devmap_trans_unstable(pmd_t *pmd)
                                                   >> 3283 {
                                                   >> 3284         return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
                                                   >> 3285 }
                                                   >> 3286 
                                                   >> 3287 static int pte_alloc_one_map(struct vm_fault *vmf)
                                                   >> 3288 {
                                                   >> 3289         struct vm_area_struct *vma = vmf->vma;
                                                   >> 3290 
                                                   >> 3291         if (!pmd_none(*vmf->pmd))
                                                   >> 3292                 goto map_pte;
                                                   >> 3293         if (vmf->prealloc_pte) {
                                                   >> 3294                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
                                                   >> 3295                 if (unlikely(!pmd_none(*vmf->pmd))) {
                                                   >> 3296                         spin_unlock(vmf->ptl);
                                                   >> 3297                         goto map_pte;
                                                   >> 3298                 }
                                                   >> 3299 
                                                   >> 3300                 mm_inc_nr_ptes(vma->vm_mm);
                                                   >> 3301                 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
                                                   >> 3302                 spin_unlock(vmf->ptl);
                                                   >> 3303                 vmf->prealloc_pte = NULL;
                                                   >> 3304         } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
                                                   >> 3305                 return VM_FAULT_OOM;
                                                   >> 3306         }
                                                   >> 3307 map_pte:
                                                   >> 3308         /*
                                                   >> 3309          * If a huge pmd materialized under us just retry later.  Use
                                                   >> 3310          * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
                                                   >> 3311          * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
                                                   >> 3312          * under us and then back to pmd_none, as a result of MADV_DONTNEED
                                                   >> 3313          * running immediately after a huge pmd fault in a different thread of
                                                   >> 3314          * this mm, in turn leading to a misleading pmd_trans_huge() retval.
                                                   >> 3315          * All we have to ensure is that it is a regular pmd that we can walk
                                                   >> 3316          * with pte_offset_map() and we can do that through an atomic read in
                                                   >> 3317          * C, which is what pmd_trans_unstable() provides.
                                                   >> 3318          */
                                                   >> 3319         if (pmd_devmap_trans_unstable(vmf->pmd))
                                                   >> 3320                 return VM_FAULT_NOPAGE;
                                                   >> 3321 
                                                   >> 3322         /*
                                                   >> 3323          * At this point we know that our vmf->pmd points to a page of ptes
                                                   >> 3324          * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
                                                   >> 3325          * for the duration of the fault.  If a racing MADV_DONTNEED runs and
                                                   >> 3326          * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
                                                   >> 3327          * be valid and we will re-check to make sure the vmf->pte isn't
                                                   >> 3328          * pte_none() under vmf->ptl protection when we return to
                                                   >> 3329          * alloc_set_pte().
                                                   >> 3330          */
                                                   >> 3331         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
                                                   >> 3332                         &vmf->ptl);
                                                   >> 3333         return 0;
                                                   >> 3334 }
                                                   >> 3335 
                                                   >> 3336 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
                                                   >> 3337 
                                                   >> 3338 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
                                                   >> 3339 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
                                                   >> 3340                 unsigned long haddr)
                                                   >> 3341 {
                                                   >> 3342         if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
                                                   >> 3343                         (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
                                                   >> 3344                 return false;
                                                   >> 3345         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
                                                   >> 3346                 return false;
                                                   >> 3347         return true;
                                                   >> 3348 }
                                                   >> 3349 
4912 static void deposit_prealloc_pte(struct vm_fa    3350 static void deposit_prealloc_pte(struct vm_fault *vmf)
4913 {                                                3351 {
4914         struct vm_area_struct *vma = vmf->vma    3352         struct vm_area_struct *vma = vmf->vma;
4915                                                  3353 
4916         pgtable_trans_huge_deposit(vma->vm_mm    3354         pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4917         /*                                       3355         /*
4918          * We are going to consume the preall    3356          * We are going to consume the prealloc table,
4919          * count that as nr_ptes.                3357          * count that as nr_ptes.
4920          */                                      3358          */
4921         mm_inc_nr_ptes(vma->vm_mm);              3359         mm_inc_nr_ptes(vma->vm_mm);
4922         vmf->prealloc_pte = NULL;                3360         vmf->prealloc_pte = NULL;
4923 }                                                3361 }
4924                                                  3362 
4925 vm_fault_t do_set_pmd(struct vm_fault *vmf, s !! 3363 static int do_set_pmd(struct vm_fault *vmf, struct page *page)
4926 {                                                3364 {
4927         struct folio *folio = page_folio(page << 
4928         struct vm_area_struct *vma = vmf->vma    3365         struct vm_area_struct *vma = vmf->vma;
4929         bool write = vmf->flags & FAULT_FLAG_    3366         bool write = vmf->flags & FAULT_FLAG_WRITE;
4930         unsigned long haddr = vmf->address &     3367         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4931         pmd_t entry;                             3368         pmd_t entry;
4932         vm_fault_t ret = VM_FAULT_FALLBACK;   !! 3369         int i, ret;
4933                                                  3370 
4934         /*                                    !! 3371         if (!transhuge_vma_suitable(vma, haddr))
4935          * It is too late to allocate a small !! 3372                 return VM_FAULT_FALLBACK;
4936          * folio in the pagecache: especially << 
4937          * PMD mappings, but PTE-mapped THP a << 
4938          * PMD mappings if THPs are disabled. << 
4939          */                                   << 
4940         if (thp_disabled_by_hw() || vma_thp_d << 
4941                 return ret;                   << 
4942                                                  3373 
4943         if (!thp_vma_suitable_order(vma, hadd !! 3374         ret = VM_FAULT_FALLBACK;
4944                 return ret;                   !! 3375         page = compound_head(page);
4945                                               << 
4946         if (folio_order(folio) != HPAGE_PMD_O << 
4947                 return ret;                   << 
4948         page = &folio->page;                  << 
4949                                                  3376 
4950         /*                                       3377         /*
4951          * Just backoff if any subpage of a T !! 3378          * Archs like ppc64 need additonal space to store information
4952          * the corrupted page may mapped by P << 
4953          * check.  This kind of THP just can  << 
4954          * the corrupted subpage should trigg << 
4955          */                                   << 
4956         if (unlikely(folio_test_has_hwpoisone << 
4957                 return ret;                   << 
4958                                               << 
4959         /*                                    << 
4960          * Archs like ppc64 need additional s << 
4961          * related to pte entry. Use the prea    3379          * related to pte entry. Use the preallocated table for that.
4962          */                                      3380          */
4963         if (arch_needs_pgtable_deposit() && !    3381         if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4964                 vmf->prealloc_pte = pte_alloc !! 3382                 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
4965                 if (!vmf->prealloc_pte)          3383                 if (!vmf->prealloc_pte)
4966                         return VM_FAULT_OOM;     3384                         return VM_FAULT_OOM;
                                                   >> 3385                 smp_wmb(); /* See comment in __pte_alloc() */
4967         }                                        3386         }
4968                                                  3387 
4969         vmf->ptl = pmd_lock(vma->vm_mm, vmf->    3388         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4970         if (unlikely(!pmd_none(*vmf->pmd)))      3389         if (unlikely(!pmd_none(*vmf->pmd)))
4971                 goto out;                        3390                 goto out;
4972                                                  3391 
4973         flush_icache_pages(vma, page, HPAGE_P !! 3392         for (i = 0; i < HPAGE_PMD_NR; i++)
                                                   >> 3393                 flush_icache_page(vma, page + i);
4974                                                  3394 
4975         entry = mk_huge_pmd(page, vma->vm_pag    3395         entry = mk_huge_pmd(page, vma->vm_page_prot);
4976         if (write)                               3396         if (write)
4977                 entry = maybe_pmd_mkwrite(pmd    3397                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4978                                                  3398 
4979         add_mm_counter(vma->vm_mm, mm_counter !! 3399         add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
4980         folio_add_file_rmap_pmd(folio, page,  !! 3400         page_add_file_rmap(page, true);
4981                                               << 
4982         /*                                       3401         /*
4983          * deposit and withdraw with pmd lock    3402          * deposit and withdraw with pmd lock held
4984          */                                      3403          */
4985         if (arch_needs_pgtable_deposit())        3404         if (arch_needs_pgtable_deposit())
4986                 deposit_prealloc_pte(vmf);       3405                 deposit_prealloc_pte(vmf);
4987                                                  3406 
4988         set_pmd_at(vma->vm_mm, haddr, vmf->pm    3407         set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4989                                                  3408 
4990         update_mmu_cache_pmd(vma, haddr, vmf-    3409         update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4991                                                  3410 
4992         /* fault is handled */                   3411         /* fault is handled */
4993         ret = 0;                                 3412         ret = 0;
4994         count_vm_event(THP_FILE_MAPPED);         3413         count_vm_event(THP_FILE_MAPPED);
4995 out:                                             3414 out:
4996         spin_unlock(vmf->ptl);                   3415         spin_unlock(vmf->ptl);
4997         return ret;                              3416         return ret;
4998 }                                                3417 }
4999 #else                                            3418 #else
5000 vm_fault_t do_set_pmd(struct vm_fault *vmf, s !! 3419 static int do_set_pmd(struct vm_fault *vmf, struct page *page)
5001 {                                                3420 {
5002         return VM_FAULT_FALLBACK;             !! 3421         BUILD_BUG();
                                                   >> 3422         return 0;
5003 }                                                3423 }
5004 #endif                                           3424 #endif
5005                                                  3425 
5006 /**                                              3426 /**
5007  * set_pte_range - Set a range of PTEs to poi !! 3427  * alloc_set_pte - setup new PTE entry for given page and add reverse page
5008  * @vmf: Fault decription.                    !! 3428  * mapping. If needed, the fucntion allocates page table or use pre-allocated.
5009  * @folio: The folio that contains @page.     !! 3429  *
5010  * @page: The first page to create a PTE for. !! 3430  * @vmf: fault environment
5011  * @nr: The number of PTEs to create.         !! 3431  * @memcg: memcg to charge page (only for private mappings)
5012  * @addr: The first address to create a PTE f !! 3432  * @page: page to map
                                                   >> 3433  *
                                                   >> 3434  * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
                                                   >> 3435  * return.
                                                   >> 3436  *
                                                   >> 3437  * Target users are page handler itself and implementations of
                                                   >> 3438  * vm_ops->map_pages.
5013  */                                              3439  */
5014 void set_pte_range(struct vm_fault *vmf, stru !! 3440 int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
5015                 struct page *page, unsigned i !! 3441                 struct page *page)
5016 {                                                3442 {
5017         struct vm_area_struct *vma = vmf->vma    3443         struct vm_area_struct *vma = vmf->vma;
5018         bool write = vmf->flags & FAULT_FLAG_    3444         bool write = vmf->flags & FAULT_FLAG_WRITE;
5019         bool prefault = !in_range(vmf->addres << 
5020         pte_t entry;                             3445         pte_t entry;
                                                   >> 3446         int ret;
5021                                                  3447 
5022         flush_icache_pages(vma, page, nr);    !! 3448         if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
5023         entry = mk_pte(page, vma->vm_page_pro !! 3449                         IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
                                                   >> 3450                 /* THP on COW? */
                                                   >> 3451                 VM_BUG_ON_PAGE(memcg, page);
5024                                                  3452 
5025         if (prefault && arch_wants_old_prefau !! 3453                 ret = do_set_pmd(vmf, page);
5026                 entry = pte_mkold(entry);     !! 3454                 if (ret != VM_FAULT_FALLBACK)
5027         else                                  !! 3455                         return ret;
5028                 entry = pte_sw_mkyoung(entry) !! 3456         }
5029                                                  3457 
                                                   >> 3458         if (!vmf->pte) {
                                                   >> 3459                 ret = pte_alloc_one_map(vmf);
                                                   >> 3460                 if (ret)
                                                   >> 3461                         return ret;
                                                   >> 3462         }
                                                   >> 3463 
                                                   >> 3464         /* Re-check under ptl */
                                                   >> 3465         if (unlikely(!pte_none(*vmf->pte)))
                                                   >> 3466                 return VM_FAULT_NOPAGE;
                                                   >> 3467 
                                                   >> 3468         flush_icache_page(vma, page);
                                                   >> 3469         entry = mk_pte(page, vma->vm_page_prot);
5030         if (write)                               3470         if (write)
5031                 entry = maybe_mkwrite(pte_mkd    3471                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5032         if (unlikely(vmf_orig_pte_uffd_wp(vmf << 
5033                 entry = pte_mkuffd_wp(entry); << 
5034         /* copy-on-write page */                 3472         /* copy-on-write page */
5035         if (write && !(vma->vm_flags & VM_SHA    3473         if (write && !(vma->vm_flags & VM_SHARED)) {
5036                 VM_BUG_ON_FOLIO(nr != 1, foli !! 3474                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
5037                 folio_add_new_anon_rmap(folio !! 3475                 page_add_new_anon_rmap(page, vma, vmf->address, false);
5038                 folio_add_lru_vma(folio, vma) !! 3476                 mem_cgroup_commit_charge(page, memcg, false, false);
                                                   >> 3477                 lru_cache_add_active_or_unevictable(page, vma);
5039         } else {                                 3478         } else {
5040                 folio_add_file_rmap_ptes(foli !! 3479                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
                                                   >> 3480                 page_add_file_rmap(page, false);
5041         }                                        3481         }
5042         set_ptes(vma->vm_mm, addr, vmf->pte,  !! 3482         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
5043                                                  3483 
5044         /* no need to invalidate: a not-prese    3484         /* no need to invalidate: a not-present page won't be cached */
5045         update_mmu_cache_range(vmf, vma, addr !! 3485         update_mmu_cache(vma, vmf->address, vmf->pte);
5046 }                                             << 
5047                                                  3486 
5048 static bool vmf_pte_changed(struct vm_fault * !! 3487         return 0;
5049 {                                             << 
5050         if (vmf->flags & FAULT_FLAG_ORIG_PTE_ << 
5051                 return !pte_same(ptep_get(vmf << 
5052                                               << 
5053         return !pte_none(ptep_get(vmf->pte)); << 
5054 }                                                3488 }
5055                                                  3489 
                                                   >> 3490 
5056 /**                                              3491 /**
5057  * finish_fault - finish page fault once we h    3492  * finish_fault - finish page fault once we have prepared the page to fault
5058  *                                               3493  *
5059  * @vmf: structure describing the fault          3494  * @vmf: structure describing the fault
5060  *                                               3495  *
5061  * This function handles all that is needed t    3496  * This function handles all that is needed to finish a page fault once the
5062  * page to fault in is prepared. It handles l    3497  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5063  * given page, adds reverse page mapping, han    3498  * given page, adds reverse page mapping, handles memcg charges and LRU
5064  * addition.                                  !! 3499  * addition. The function returns 0 on success, VM_FAULT_ code in case of
                                                   >> 3500  * error.
5065  *                                               3501  *
5066  * The function expects the page to be locked    3502  * The function expects the page to be locked and on success it consumes a
5067  * reference of a page being mapped (for the     3503  * reference of a page being mapped (for the PTE which maps it).
5068  *                                            << 
5069  * Return: %0 on success, %VM_FAULT_ code in  << 
5070  */                                              3504  */
5071 vm_fault_t finish_fault(struct vm_fault *vmf) !! 3505 int finish_fault(struct vm_fault *vmf)
5072 {                                                3506 {
5073         struct vm_area_struct *vma = vmf->vma << 
5074         struct page *page;                       3507         struct page *page;
5075         struct folio *folio;                  !! 3508         int ret = 0;
5076         vm_fault_t ret;                       << 
5077         bool is_cow = (vmf->flags & FAULT_FLA << 
5078                       !(vma->vm_flags & VM_SH << 
5079         int type, nr_pages;                   << 
5080         unsigned long addr = vmf->address;    << 
5081                                                  3509 
5082         /* Did we COW the page? */               3510         /* Did we COW the page? */
5083         if (is_cow)                           !! 3511         if ((vmf->flags & FAULT_FLAG_WRITE) &&
                                                   >> 3512             !(vmf->vma->vm_flags & VM_SHARED))
5084                 page = vmf->cow_page;            3513                 page = vmf->cow_page;
5085         else                                     3514         else
5086                 page = vmf->page;                3515                 page = vmf->page;
5087                                                  3516 
5088         /*                                       3517         /*
5089          * check even for read faults because    3518          * check even for read faults because we might have lost our CoWed
5090          * page                                  3519          * page
5091          */                                      3520          */
5092         if (!(vma->vm_flags & VM_SHARED)) {   !! 3521         if (!(vmf->vma->vm_flags & VM_SHARED))
5093                 ret = check_stable_address_sp !! 3522                 ret = check_stable_address_space(vmf->vma->vm_mm);
5094                 if (ret)                      !! 3523         if (!ret)
5095                         return ret;           !! 3524                 ret = alloc_set_pte(vmf, vmf->memcg, page);
5096         }                                     !! 3525         if (vmf->pte)
5097                                               !! 3526                 pte_unmap_unlock(vmf->pte, vmf->ptl);
5098         if (pmd_none(*vmf->pmd)) {            << 
5099                 if (PageTransCompound(page))  << 
5100                         ret = do_set_pmd(vmf, << 
5101                         if (ret != VM_FAULT_F << 
5102                                 return ret;   << 
5103                 }                             << 
5104                                               << 
5105                 if (vmf->prealloc_pte)        << 
5106                         pmd_install(vma->vm_m << 
5107                 else if (unlikely(pte_alloc(v << 
5108                         return VM_FAULT_OOM;  << 
5109         }                                     << 
5110                                               << 
5111         folio = page_folio(page);             << 
5112         nr_pages = folio_nr_pages(folio);     << 
5113                                               << 
5114         /*                                    << 
5115          * Using per-page fault to maintain t << 
5116          * approach also applies to non-anony << 
5117          * inflating the RSS of the process.  << 
5118          */                                   << 
5119         if (!vma_is_anon_shmem(vma) || unlike << 
5120                 nr_pages = 1;                 << 
5121         } else if (nr_pages > 1) {            << 
5122                 pgoff_t idx = folio_page_idx( << 
5123                 /* The page offset of vmf->ad << 
5124                 pgoff_t vma_off = vmf->pgoff  << 
5125                 /* The index of the entry in  << 
5126                 pgoff_t pte_off = pte_index(v << 
5127                                               << 
5128                 /*                            << 
5129                  * Fallback to per-page fault << 
5130                  * cache beyond the VMA limit << 
5131                  */                           << 
5132                 if (unlikely(vma_off < idx || << 
5133                             vma_off + (nr_pag << 
5134                             pte_off < idx ||  << 
5135                             pte_off + (nr_pag << 
5136                         nr_pages = 1;         << 
5137                 } else {                      << 
5138                         /* Now we can set map << 
5139                         addr = vmf->address - << 
5140                         page = &folio->page;  << 
5141                 }                             << 
5142         }                                     << 
5143                                               << 
5144         vmf->pte = pte_offset_map_lock(vma->v << 
5145                                        addr,  << 
5146         if (!vmf->pte)                        << 
5147                 return VM_FAULT_NOPAGE;       << 
5148                                               << 
5149         /* Re-check under ptl */              << 
5150         if (nr_pages == 1 && unlikely(vmf_pte << 
5151                 update_mmu_tlb(vma, addr, vmf << 
5152                 ret = VM_FAULT_NOPAGE;        << 
5153                 goto unlock;                  << 
5154         } else if (nr_pages > 1 && !pte_range << 
5155                 update_mmu_tlb_range(vma, add << 
5156                 ret = VM_FAULT_NOPAGE;        << 
5157                 goto unlock;                  << 
5158         }                                     << 
5159                                               << 
5160         folio_ref_add(folio, nr_pages - 1);   << 
5161         set_pte_range(vmf, folio, page, nr_pa << 
5162         type = is_cow ? MM_ANONPAGES : mm_cou << 
5163         add_mm_counter(vma->vm_mm, type, nr_p << 
5164         ret = 0;                              << 
5165                                               << 
5166 unlock:                                       << 
5167         pte_unmap_unlock(vmf->pte, vmf->ptl); << 
5168         return ret;                              3527         return ret;
5169 }                                                3528 }
5170                                                  3529 
5171 static unsigned long fault_around_pages __rea !! 3530 static unsigned long fault_around_bytes __read_mostly =
5172         65536 >> PAGE_SHIFT;                  !! 3531         rounddown_pow_of_two(65536);
5173                                                  3532 
5174 #ifdef CONFIG_DEBUG_FS                           3533 #ifdef CONFIG_DEBUG_FS
5175 static int fault_around_bytes_get(void *data,    3534 static int fault_around_bytes_get(void *data, u64 *val)
5176 {                                                3535 {
5177         *val = fault_around_pages << PAGE_SHI !! 3536         *val = fault_around_bytes;
5178         return 0;                                3537         return 0;
5179 }                                                3538 }
5180                                                  3539 
5181 /*                                               3540 /*
5182  * fault_around_bytes must be rounded down to    3541  * fault_around_bytes must be rounded down to the nearest page order as it's
5183  * what do_fault_around() expects to see.        3542  * what do_fault_around() expects to see.
5184  */                                              3543  */
5185 static int fault_around_bytes_set(void *data,    3544 static int fault_around_bytes_set(void *data, u64 val)
5186 {                                                3545 {
5187         if (val / PAGE_SIZE > PTRS_PER_PTE)      3546         if (val / PAGE_SIZE > PTRS_PER_PTE)
5188                 return -EINVAL;                  3547                 return -EINVAL;
5189                                               !! 3548         if (val > PAGE_SIZE)
5190         /*                                    !! 3549                 fault_around_bytes = rounddown_pow_of_two(val);
5191          * The minimum value is 1 page, howev !! 3550         else
5192          * at all. See should_fault_around(). !! 3551                 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
5193          */                                   << 
5194         val = max(val, PAGE_SIZE);            << 
5195         fault_around_pages = rounddown_pow_of << 
5196                                               << 
5197         return 0;                                3552         return 0;
5198 }                                                3553 }
5199 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_f    3554 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5200                 fault_around_bytes_get, fault    3555                 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5201                                                  3556 
5202 static int __init fault_around_debugfs(void)     3557 static int __init fault_around_debugfs(void)
5203 {                                                3558 {
5204         debugfs_create_file_unsafe("fault_aro !! 3559         void *ret;
5205                                    &fault_aro !! 3560 
                                                   >> 3561         ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
                                                   >> 3562                         &fault_around_bytes_fops);
                                                   >> 3563         if (!ret)
                                                   >> 3564                 pr_warn("Failed to create fault_around_bytes in debugfs");
5206         return 0;                                3565         return 0;
5207 }                                                3566 }
5208 late_initcall(fault_around_debugfs);             3567 late_initcall(fault_around_debugfs);
5209 #endif                                           3568 #endif
5210                                                  3569 
5211 /*                                               3570 /*
5212  * do_fault_around() tries to map few pages a    3571  * do_fault_around() tries to map few pages around the fault address. The hope
5213  * is that the pages will be needed soon and     3572  * is that the pages will be needed soon and this will lower the number of
5214  * faults to handle.                             3573  * faults to handle.
5215  *                                               3574  *
5216  * It uses vm_ops->map_pages() to map the pag    3575  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5217  * not ready to be mapped: not up-to-date, lo    3576  * not ready to be mapped: not up-to-date, locked, etc.
5218  *                                               3577  *
5219  * This function doesn't cross VMA or page ta !! 3578  * This function is called with the page table lock taken. In the split ptlock
5220  * map_pages() and acquire a PTE lock only on !! 3579  * case the page table lock only protects only those entries which belong to
                                                   >> 3580  * the page table corresponding to the fault address.
5221  *                                               3581  *
5222  * fault_around_pages defines how many pages  !! 3582  * This function doesn't cross the VMA boundaries, in order to call map_pages()
                                                   >> 3583  * only once.
                                                   >> 3584  *
                                                   >> 3585  * fault_around_bytes defines how many bytes we'll try to map.
5223  * do_fault_around() expects it to be set to     3586  * do_fault_around() expects it to be set to a power of two less than or equal
5224  * to PTRS_PER_PTE.                              3587  * to PTRS_PER_PTE.
5225  *                                               3588  *
5226  * The virtual address of the area that we ma    3589  * The virtual address of the area that we map is naturally aligned to
5227  * fault_around_pages * PAGE_SIZE rounded dow !! 3590  * fault_around_bytes rounded down to the machine page size
5228  * (and therefore to page order).  This way i    3591  * (and therefore to page order).  This way it's easier to guarantee
5229  * that we don't cross page table boundaries.    3592  * that we don't cross page table boundaries.
5230  */                                              3593  */
5231 static vm_fault_t do_fault_around(struct vm_f !! 3594 static int do_fault_around(struct vm_fault *vmf)
5232 {                                                3595 {
5233         pgoff_t nr_pages = READ_ONCE(fault_ar !! 3596         unsigned long address = vmf->address, nr_pages, mask;
5234         pgoff_t pte_off = pte_index(vmf->addr !! 3597         pgoff_t start_pgoff = vmf->pgoff;
5235         /* The page offset of vmf->address wi !! 3598         pgoff_t end_pgoff;
5236         pgoff_t vma_off = vmf->pgoff - vmf->v !! 3599         int off, ret = 0;
5237         pgoff_t from_pte, to_pte;             !! 3600 
5238         vm_fault_t ret;                       !! 3601         nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
5239                                               !! 3602         mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
5240         /* The PTE offset of the start addres !! 3603 
5241         from_pte = max(ALIGN_DOWN(pte_off, nr !! 3604         vmf->address = max(address & mask, vmf->vma->vm_start);
5242                        pte_off - min(pte_off, !! 3605         off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
5243                                               !! 3606         start_pgoff -= off;
5244         /* The PTE offset of the end address, !! 3607 
5245         to_pte = min3(from_pte + nr_pages, (p !! 3608         /*
5246                       pte_off + vma_pages(vmf !! 3609          *  end_pgoff is either the end of the page table, the end of
                                                   >> 3610          *  the vma or nr_pages from start_pgoff, depending what is nearest.
                                                   >> 3611          */
                                                   >> 3612         end_pgoff = start_pgoff -
                                                   >> 3613                 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
                                                   >> 3614                 PTRS_PER_PTE - 1;
                                                   >> 3615         end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
                                                   >> 3616                         start_pgoff + nr_pages - 1);
5247                                                  3617 
5248         if (pmd_none(*vmf->pmd)) {               3618         if (pmd_none(*vmf->pmd)) {
5249                 vmf->prealloc_pte = pte_alloc !! 3619                 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
                                                   >> 3620                                                   vmf->address);
5250                 if (!vmf->prealloc_pte)          3621                 if (!vmf->prealloc_pte)
5251                         return VM_FAULT_OOM;  !! 3622                         goto out;
                                                   >> 3623                 smp_wmb(); /* See comment in __pte_alloc() */
5252         }                                        3624         }
5253                                                  3625 
5254         rcu_read_lock();                      !! 3626         vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
5255         ret = vmf->vma->vm_ops->map_pages(vmf << 
5256                         vmf->pgoff + from_pte << 
5257                         vmf->pgoff + to_pte - << 
5258         rcu_read_unlock();                    << 
5259                                                  3627 
5260         return ret;                           !! 3628         /* Huge page is mapped? Page fault is solved */
5261 }                                             !! 3629         if (pmd_trans_huge(*vmf->pmd)) {
5262                                               !! 3630                 ret = VM_FAULT_NOPAGE;
5263 /* Return true if we should do read fault-aro !! 3631                 goto out;
5264 static inline bool should_fault_around(struct !! 3632         }
5265 {                                             << 
5266         /* No ->map_pages?  No way to fault a << 
5267         if (!vmf->vma->vm_ops->map_pages)     << 
5268                 return false;                 << 
5269                                                  3633 
5270         if (uffd_disable_fault_around(vmf->vm !! 3634         /* ->map_pages() haven't done anything useful. Cold page cache? */
5271                 return false;                 !! 3635         if (!vmf->pte)
                                                   >> 3636                 goto out;
5272                                                  3637 
5273         /* A single page implies no faulting  !! 3638         /* check if the page fault is solved */
5274         return fault_around_pages > 1;        !! 3639         vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
                                                   >> 3640         if (!pte_none(*vmf->pte))
                                                   >> 3641                 ret = VM_FAULT_NOPAGE;
                                                   >> 3642         pte_unmap_unlock(vmf->pte, vmf->ptl);
                                                   >> 3643 out:
                                                   >> 3644         vmf->address = address;
                                                   >> 3645         vmf->pte = NULL;
                                                   >> 3646         return ret;
5275 }                                                3647 }
5276                                                  3648 
5277 static vm_fault_t do_read_fault(struct vm_fau !! 3649 static int do_read_fault(struct vm_fault *vmf)
5278 {                                                3650 {
5279         vm_fault_t ret = 0;                   !! 3651         struct vm_area_struct *vma = vmf->vma;
5280         struct folio *folio;                  !! 3652         int ret = 0;
5281                                                  3653 
5282         /*                                       3654         /*
5283          * Let's call ->map_pages() first and    3655          * Let's call ->map_pages() first and use ->fault() as fallback
5284          * if page by the offset is not ready    3656          * if page by the offset is not ready to be mapped (cold cache or
5285          * something).                           3657          * something).
5286          */                                      3658          */
5287         if (should_fault_around(vmf)) {       !! 3659         if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
5288                 ret = do_fault_around(vmf);      3660                 ret = do_fault_around(vmf);
5289                 if (ret)                         3661                 if (ret)
5290                         return ret;              3662                         return ret;
5291         }                                        3663         }
5292                                                  3664 
5293         ret = vmf_can_call_fault(vmf);        << 
5294         if (ret)                              << 
5295                 return ret;                   << 
5296                                               << 
5297         ret = __do_fault(vmf);                   3665         ret = __do_fault(vmf);
5298         if (unlikely(ret & (VM_FAULT_ERROR |     3666         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5299                 return ret;                      3667                 return ret;
5300                                                  3668 
5301         ret |= finish_fault(vmf);                3669         ret |= finish_fault(vmf);
5302         folio = page_folio(vmf->page);        !! 3670         unlock_page(vmf->page);
5303         folio_unlock(folio);                  << 
5304         if (unlikely(ret & (VM_FAULT_ERROR |     3671         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5305                 folio_put(folio);             !! 3672                 put_page(vmf->page);
5306         return ret;                              3673         return ret;
5307 }                                                3674 }
5308                                                  3675 
5309 static vm_fault_t do_cow_fault(struct vm_faul !! 3676 static int do_cow_fault(struct vm_fault *vmf)
5310 {                                                3677 {
5311         struct vm_area_struct *vma = vmf->vma    3678         struct vm_area_struct *vma = vmf->vma;
5312         struct folio *folio;                  !! 3679         int ret;
5313         vm_fault_t ret;                       << 
5314                                                  3680 
5315         ret = vmf_can_call_fault(vmf);        !! 3681         if (unlikely(anon_vma_prepare(vma)))
5316         if (!ret)                             !! 3682                 return VM_FAULT_OOM;
5317                 ret = vmf_anon_prepare(vmf);  << 
5318         if (ret)                              << 
5319                 return ret;                   << 
5320                                                  3683 
5321         folio = folio_prealloc(vma->vm_mm, vm !! 3684         vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
5322         if (!folio)                           !! 3685         if (!vmf->cow_page)
5323                 return VM_FAULT_OOM;             3686                 return VM_FAULT_OOM;
5324                                                  3687 
5325         vmf->cow_page = &folio->page;         !! 3688         if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
                                                   >> 3689                                 &vmf->memcg, false)) {
                                                   >> 3690                 put_page(vmf->cow_page);
                                                   >> 3691                 return VM_FAULT_OOM;
                                                   >> 3692         }
5326                                                  3693 
5327         ret = __do_fault(vmf);                   3694         ret = __do_fault(vmf);
5328         if (unlikely(ret & (VM_FAULT_ERROR |     3695         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5329                 goto uncharge_out;               3696                 goto uncharge_out;
5330         if (ret & VM_FAULT_DONE_COW)             3697         if (ret & VM_FAULT_DONE_COW)
5331                 return ret;                      3698                 return ret;
5332                                                  3699 
5333         if (copy_mc_user_highpage(vmf->cow_pa !! 3700         copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
5334                 ret = VM_FAULT_HWPOISON;      !! 3701         __SetPageUptodate(vmf->cow_page);
5335                 goto unlock;                  << 
5336         }                                     << 
5337         __folio_mark_uptodate(folio);         << 
5338                                                  3702 
5339         ret |= finish_fault(vmf);                3703         ret |= finish_fault(vmf);
5340 unlock:                                       << 
5341         unlock_page(vmf->page);                  3704         unlock_page(vmf->page);
5342         put_page(vmf->page);                     3705         put_page(vmf->page);
5343         if (unlikely(ret & (VM_FAULT_ERROR |     3706         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5344                 goto uncharge_out;               3707                 goto uncharge_out;
5345         return ret;                              3708         return ret;
5346 uncharge_out:                                    3709 uncharge_out:
5347         folio_put(folio);                     !! 3710         mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
                                                   >> 3711         put_page(vmf->cow_page);
5348         return ret;                              3712         return ret;
5349 }                                                3713 }
5350                                                  3714 
5351 static vm_fault_t do_shared_fault(struct vm_f !! 3715 static int do_shared_fault(struct vm_fault *vmf)
5352 {                                                3716 {
5353         struct vm_area_struct *vma = vmf->vma    3717         struct vm_area_struct *vma = vmf->vma;
5354         vm_fault_t ret, tmp;                  !! 3718         int ret, tmp;
5355         struct folio *folio;                  << 
5356                                               << 
5357         ret = vmf_can_call_fault(vmf);        << 
5358         if (ret)                              << 
5359                 return ret;                   << 
5360                                                  3719 
5361         ret = __do_fault(vmf);                   3720         ret = __do_fault(vmf);
5362         if (unlikely(ret & (VM_FAULT_ERROR |     3721         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5363                 return ret;                      3722                 return ret;
5364                                                  3723 
5365         folio = page_folio(vmf->page);        << 
5366                                               << 
5367         /*                                       3724         /*
5368          * Check if the backing address space    3725          * Check if the backing address space wants to know that the page is
5369          * about to become writable              3726          * about to become writable
5370          */                                      3727          */
5371         if (vma->vm_ops->page_mkwrite) {         3728         if (vma->vm_ops->page_mkwrite) {
5372                 folio_unlock(folio);          !! 3729                 unlock_page(vmf->page);
5373                 tmp = do_page_mkwrite(vmf, fo !! 3730                 tmp = do_page_mkwrite(vmf);
5374                 if (unlikely(!tmp ||             3731                 if (unlikely(!tmp ||
5375                                 (tmp & (VM_FA    3732                                 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5376                         folio_put(folio);     !! 3733                         put_page(vmf->page);
5377                         return tmp;              3734                         return tmp;
5378                 }                                3735                 }
5379         }                                        3736         }
5380                                                  3737 
5381         ret |= finish_fault(vmf);                3738         ret |= finish_fault(vmf);
5382         if (unlikely(ret & (VM_FAULT_ERROR |     3739         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5383                                         VM_FA    3740                                         VM_FAULT_RETRY))) {
5384                 folio_unlock(folio);          !! 3741                 unlock_page(vmf->page);
5385                 folio_put(folio);             !! 3742                 put_page(vmf->page);
5386                 return ret;                      3743                 return ret;
5387         }                                        3744         }
5388                                                  3745 
5389         ret |= fault_dirty_shared_page(vmf);  !! 3746         fault_dirty_shared_page(vma, vmf->page);
5390         return ret;                              3747         return ret;
5391 }                                                3748 }
5392                                                  3749 
5393 /*                                               3750 /*
5394  * We enter with non-exclusive mmap_lock (to  !! 3751  * We enter with non-exclusive mmap_sem (to exclude vma changes,
5395  * but allow concurrent faults).                 3752  * but allow concurrent faults).
5396  * The mmap_lock may have been released depen !! 3753  * The mmap_sem may have been released depending on flags and our
5397  * return value.  See filemap_fault() and __f !! 3754  * return value.  See filemap_fault() and __lock_page_or_retry().
5398  * If mmap_lock is released, vma may become i << 
5399  * by other thread calling munmap()).         << 
5400  */                                              3755  */
5401 static vm_fault_t do_fault(struct vm_fault *v !! 3756 static int do_fault(struct vm_fault *vmf)
5402 {                                                3757 {
5403         struct vm_area_struct *vma = vmf->vma    3758         struct vm_area_struct *vma = vmf->vma;
5404         struct mm_struct *vm_mm = vma->vm_mm; !! 3759         int ret;
5405         vm_fault_t ret;                       << 
5406                                               << 
5407         /*                                    << 
5408          * The VMA was not fully populated on << 
5409          */                                   << 
5410         if (!vma->vm_ops->fault) {            << 
5411                 vmf->pte = pte_offset_map_loc << 
5412                                               << 
5413                 if (unlikely(!vmf->pte))      << 
5414                         ret = VM_FAULT_SIGBUS << 
5415                 else {                        << 
5416                         /*                    << 
5417                          * Make sure this is  << 
5418                          * by holding ptl and << 
5419                          * of pte involves: t << 
5420                          * we don't have conc << 
5421                          * followed by an upd << 
5422                          */                   << 
5423                         if (unlikely(pte_none << 
5424                                 ret = VM_FAUL << 
5425                         else                  << 
5426                                 ret = VM_FAUL << 
5427                                                  3760 
5428                         pte_unmap_unlock(vmf- !! 3761         /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
5429                 }                             !! 3762         if (!vma->vm_ops->fault)
5430         } else if (!(vmf->flags & FAULT_FLAG_ !! 3763                 ret = VM_FAULT_SIGBUS;
                                                   >> 3764         else if (!(vmf->flags & FAULT_FLAG_WRITE))
5431                 ret = do_read_fault(vmf);        3765                 ret = do_read_fault(vmf);
5432         else if (!(vma->vm_flags & VM_SHARED)    3766         else if (!(vma->vm_flags & VM_SHARED))
5433                 ret = do_cow_fault(vmf);         3767                 ret = do_cow_fault(vmf);
5434         else                                     3768         else
5435                 ret = do_shared_fault(vmf);      3769                 ret = do_shared_fault(vmf);
5436                                                  3770 
5437         /* preallocated pagetable is unused:     3771         /* preallocated pagetable is unused: free it */
5438         if (vmf->prealloc_pte) {                 3772         if (vmf->prealloc_pte) {
5439                 pte_free(vm_mm, vmf->prealloc !! 3773                 pte_free(vma->vm_mm, vmf->prealloc_pte);
5440                 vmf->prealloc_pte = NULL;        3774                 vmf->prealloc_pte = NULL;
5441         }                                        3775         }
5442         return ret;                              3776         return ret;
5443 }                                                3777 }
5444                                                  3778 
5445 int numa_migrate_check(struct folio *folio, s !! 3779 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
5446                       unsigned long addr, int !! 3780                                 unsigned long addr, int page_nid,
5447                       bool writable, int *las !! 3781                                 int *flags)
5448 {                                                3782 {
5449         struct vm_area_struct *vma = vmf->vma !! 3783         get_page(page);
5450                                               << 
5451         /*                                    << 
5452          * Avoid grouping on RO pages in gene << 
5453          * much anyway since they can be in s << 
5454          * the case where a mapping is writab << 
5455          * to it but pte_write gets cleared d << 
5456          * pte_dirty has unpredictable behavi << 
5457          * background writeback, dirty balanc << 
5458          */                                   << 
5459         if (!writable)                        << 
5460                 *flags |= TNF_NO_GROUP;       << 
5461                                               << 
5462         /*                                    << 
5463          * Flag if the folio is shared betwee << 
5464          * is later used when determining whe << 
5465          */                                   << 
5466         if (folio_likely_mapped_shared(folio) << 
5467                 *flags |= TNF_SHARED;         << 
5468         /*                                    << 
5469          * For memory tiering mode, cpupid of << 
5470          * to record page access time.  So us << 
5471          */                                   << 
5472         if (folio_use_access_time(folio))     << 
5473                 *last_cpupid = (-1 & LAST_CPU << 
5474         else                                  << 
5475                 *last_cpupid = folio_last_cpu << 
5476                                               << 
5477         /* Record the current PID acceesing V << 
5478         vma_set_access_pid_bit(vma);          << 
5479                                                  3784 
5480         count_vm_numa_event(NUMA_HINT_FAULTS)    3785         count_vm_numa_event(NUMA_HINT_FAULTS);
5481 #ifdef CONFIG_NUMA_BALANCING                  !! 3786         if (page_nid == numa_node_id()) {
5482         count_memcg_folio_events(folio, NUMA_ << 
5483 #endif                                        << 
5484         if (folio_nid(folio) == numa_node_id( << 
5485                 count_vm_numa_event(NUMA_HINT    3787                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5486                 *flags |= TNF_FAULT_LOCAL;       3788                 *flags |= TNF_FAULT_LOCAL;
5487         }                                        3789         }
5488                                                  3790 
5489         return mpol_misplaced(folio, vmf, add !! 3791         return mpol_misplaced(page, vma, addr);
5490 }                                             << 
5491                                               << 
5492 static void numa_rebuild_single_mapping(struc << 
5493                                         unsig << 
5494                                         bool  << 
5495 {                                             << 
5496         pte_t pte, old_pte;                   << 
5497                                               << 
5498         old_pte = ptep_modify_prot_start(vma, << 
5499         pte = pte_modify(old_pte, vma->vm_pag << 
5500         pte = pte_mkyoung(pte);               << 
5501         if (writable)                         << 
5502                 pte = pte_mkwrite(pte, vma);  << 
5503         ptep_modify_prot_commit(vma, fault_ad << 
5504         update_mmu_cache_range(vmf, vma, faul << 
5505 }                                             << 
5506                                               << 
5507 static void numa_rebuild_large_mapping(struct << 
5508                                        struct << 
5509                                        bool i << 
5510 {                                             << 
5511         int nr = pte_pfn(fault_pte) - folio_p << 
5512         unsigned long start, end, addr = vmf- << 
5513         unsigned long addr_start = addr - (nr << 
5514         unsigned long pt_start = ALIGN_DOWN(a << 
5515         pte_t *start_ptep;                    << 
5516                                               << 
5517         /* Stay within the VMA and within the << 
5518         start = max3(addr_start, pt_start, vm << 
5519         end = min3(addr_start + folio_size(fo << 
5520                    vma->vm_end);              << 
5521         start_ptep = vmf->pte - ((addr - star << 
5522                                               << 
5523         /* Restore all PTEs' mapping of the l << 
5524         for (addr = start; addr != end; start << 
5525                 pte_t ptent = ptep_get(start_ << 
5526                 bool writable = false;        << 
5527                                               << 
5528                 if (!pte_present(ptent) || !p << 
5529                         continue;             << 
5530                                               << 
5531                 if (pfn_folio(pte_pfn(ptent)) << 
5532                         continue;             << 
5533                                               << 
5534                 if (!ignore_writable) {       << 
5535                         ptent = pte_modify(pt << 
5536                         writable = pte_write( << 
5537                         if (!writable && pte_ << 
5538                             can_change_pte_wr << 
5539                                 writable = tr << 
5540                 }                             << 
5541                                               << 
5542                 numa_rebuild_single_mapping(v << 
5543         }                                     << 
5544 }                                                3792 }
5545                                                  3793 
5546 static vm_fault_t do_numa_page(struct vm_faul !! 3794 static int do_numa_page(struct vm_fault *vmf)
5547 {                                                3795 {
5548         struct vm_area_struct *vma = vmf->vma    3796         struct vm_area_struct *vma = vmf->vma;
5549         struct folio *folio = NULL;           !! 3797         struct page *page = NULL;
5550         int nid = NUMA_NO_NODE;               !! 3798         int page_nid = -1;
5551         bool writable = false, ignore_writabl << 
5552         bool pte_write_upgrade = vma_wants_ma << 
5553         int last_cpupid;                         3799         int last_cpupid;
5554         int target_nid;                          3800         int target_nid;
5555         pte_t pte, old_pte;                   !! 3801         bool migrated = false;
5556         int flags = 0, nr_pages;              !! 3802         pte_t pte;
                                                   >> 3803         bool was_writable = pte_savedwrite(vmf->orig_pte);
                                                   >> 3804         int flags = 0;
5557                                                  3805 
5558         /*                                       3806         /*
5559          * The pte cannot be used safely unti !! 3807          * The "pte" at this point cannot be used safely without
5560          * table lock, that its contents have !! 3808          * validation through pte_unmap_same(). It's of NUMA type but
                                                   >> 3809          * the pfn may be screwed if the read is non atomic.
5561          */                                      3810          */
                                                   >> 3811         vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
5562         spin_lock(vmf->ptl);                     3812         spin_lock(vmf->ptl);
5563         /* Read the live PTE from the page ta !! 3813         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
5564         old_pte = ptep_get(vmf->pte);         << 
5565                                               << 
5566         if (unlikely(!pte_same(old_pte, vmf-> << 
5567                 pte_unmap_unlock(vmf->pte, vm    3814                 pte_unmap_unlock(vmf->pte, vmf->ptl);
5568                 return 0;                     !! 3815                 goto out;
5569         }                                        3816         }
5570                                                  3817 
5571         pte = pte_modify(old_pte, vma->vm_pag << 
5572                                               << 
5573         /*                                       3818         /*
5574          * Detect now whether the PTE could b !! 3819          * Make it present again, Depending on how arch implementes non
5575          * is only valid while holding the PT !! 3820          * accessible ptes, some can allow access by kernel mode.
5576          */                                      3821          */
5577         writable = pte_write(pte);            !! 3822         pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte);
5578         if (!writable && pte_write_upgrade && !! 3823         pte = pte_modify(pte, vma->vm_page_prot);
5579             can_change_pte_writable(vma, vmf- !! 3824         pte = pte_mkyoung(pte);
5580                 writable = true;              !! 3825         if (was_writable)
5581                                               !! 3826                 pte = pte_mkwrite(pte);
5582         folio = vm_normal_folio(vma, vmf->add !! 3827         ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
5583         if (!folio || folio_is_zone_device(fo !! 3828         update_mmu_cache(vma, vmf->address, vmf->pte);
5584                 goto out_map;                 << 
5585                                               << 
5586         nid = folio_nid(folio);               << 
5587         nr_pages = folio_nr_pages(folio);     << 
5588                                               << 
5589         target_nid = numa_migrate_check(folio << 
5590                                         writa << 
5591         if (target_nid == NUMA_NO_NODE)       << 
5592                 goto out_map;                 << 
5593         if (migrate_misplaced_folio_prepare(f << 
5594                 flags |= TNF_MIGRATE_FAIL;    << 
5595                 goto out_map;                 << 
5596         }                                     << 
5597         /* The folio is isolated and isolatio << 
5598         pte_unmap_unlock(vmf->pte, vmf->ptl); << 
5599         writable = false;                     << 
5600         ignore_writable = true;               << 
5601                                                  3829 
5602         /* Migrate to the requested node */   !! 3830         page = vm_normal_page(vma, vmf->address, pte);
5603         if (!migrate_misplaced_folio(folio, v !! 3831         if (!page) {
5604                 nid = target_nid;             !! 3832                 pte_unmap_unlock(vmf->pte, vmf->ptl);
5605                 flags |= TNF_MIGRATED;        << 
5606                 task_numa_fault(last_cpupid,  << 
5607                 return 0;                        3833                 return 0;
5608         }                                        3834         }
5609                                                  3835 
5610         flags |= TNF_MIGRATE_FAIL;            !! 3836         /* TODO: handle PTE-mapped THP */
5611         vmf->pte = pte_offset_map_lock(vma->v !! 3837         if (PageCompound(page)) {
5612                                        vmf->a << 
5613         if (unlikely(!vmf->pte))              << 
5614                 return 0;                     << 
5615         if (unlikely(!pte_same(ptep_get(vmf-> << 
5616                 pte_unmap_unlock(vmf->pte, vm    3838                 pte_unmap_unlock(vmf->pte, vmf->ptl);
5617                 return 0;                        3839                 return 0;
5618         }                                        3840         }
5619 out_map:                                      !! 3841 
5620         /*                                       3842         /*
5621          * Make it present again, depending o !! 3843          * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5622          * non-accessible ptes, some can allo !! 3844          * much anyway since they can be in shared cache state. This misses
                                                   >> 3845          * the case where a mapping is writable but the process never writes
                                                   >> 3846          * to it but pte_write gets cleared during protection updates and
                                                   >> 3847          * pte_dirty has unpredictable behaviour between PTE scan updates,
                                                   >> 3848          * background writeback, dirty balancing and application behaviour.
5623          */                                      3849          */
5624         if (folio && folio_test_large(folio)) !! 3850         if (!pte_write(pte))
5625                 numa_rebuild_large_mapping(vm !! 3851                 flags |= TNF_NO_GROUP;
5626                                            pt !! 3852 
5627         else                                  !! 3853         /*
5628                 numa_rebuild_single_mapping(v !! 3854          * Flag if the page is shared between multiple address spaces. This
5629                                             w !! 3855          * is later used when determining whether to group tasks together
                                                   >> 3856          */
                                                   >> 3857         if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
                                                   >> 3858                 flags |= TNF_SHARED;
                                                   >> 3859 
                                                   >> 3860         last_cpupid = page_cpupid_last(page);
                                                   >> 3861         page_nid = page_to_nid(page);
                                                   >> 3862         target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
                                                   >> 3863                         &flags);
5630         pte_unmap_unlock(vmf->pte, vmf->ptl);    3864         pte_unmap_unlock(vmf->pte, vmf->ptl);
                                                   >> 3865         if (target_nid == -1) {
                                                   >> 3866                 put_page(page);
                                                   >> 3867                 goto out;
                                                   >> 3868         }
5631                                                  3869 
5632         if (nid != NUMA_NO_NODE)              !! 3870         /* Migrate to the requested node */
5633                 task_numa_fault(last_cpupid,  !! 3871         migrated = migrate_misplaced_page(page, vma, target_nid);
                                                   >> 3872         if (migrated) {
                                                   >> 3873                 page_nid = target_nid;
                                                   >> 3874                 flags |= TNF_MIGRATED;
                                                   >> 3875         } else
                                                   >> 3876                 flags |= TNF_MIGRATE_FAIL;
                                                   >> 3877 
                                                   >> 3878 out:
                                                   >> 3879         if (page_nid != -1)
                                                   >> 3880                 task_numa_fault(last_cpupid, page_nid, 1, flags);
5634         return 0;                                3881         return 0;
5635 }                                                3882 }
5636                                                  3883 
5637 static inline vm_fault_t create_huge_pmd(stru !! 3884 static inline int create_huge_pmd(struct vm_fault *vmf)
5638 {                                                3885 {
5639         struct vm_area_struct *vma = vmf->vma !! 3886         if (vma_is_anonymous(vmf->vma))
5640         if (vma_is_anonymous(vma))            << 
5641                 return do_huge_pmd_anonymous_    3887                 return do_huge_pmd_anonymous_page(vmf);
5642         if (vma->vm_ops->huge_fault)          !! 3888         if (vmf->vma->vm_ops->huge_fault)
5643                 return vma->vm_ops->huge_faul !! 3889                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
5644         return VM_FAULT_FALLBACK;                3890         return VM_FAULT_FALLBACK;
5645 }                                                3891 }
5646                                                  3892 
5647 /* `inline' is required to avoid gcc 4.1.2 bu    3893 /* `inline' is required to avoid gcc 4.1.2 build error */
5648 static inline vm_fault_t wp_huge_pmd(struct v !! 3894 static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
5649 {                                                3895 {
5650         struct vm_area_struct *vma = vmf->vma !! 3896         if (vma_is_anonymous(vmf->vma))
5651         const bool unshare = vmf->flags & FAU !! 3897                 return do_huge_pmd_wp_page(vmf, orig_pmd);
5652         vm_fault_t ret;                       !! 3898         if (vmf->vma->vm_ops->huge_fault)
5653                                               !! 3899                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
5654         if (vma_is_anonymous(vma)) {          !! 3900 
5655                 if (likely(!unshare) &&       !! 3901         /* COW handled on pte level: split pmd */
5656                     userfaultfd_huge_pmd_wp(v !! 3902         VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
5657                         if (userfaultfd_wp_as !! 3903         __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
5658                                 goto split;   << 
5659                         return handle_userfau << 
5660                 }                             << 
5661                 return do_huge_pmd_wp_page(vm << 
5662         }                                     << 
5663                                               << 
5664         if (vma->vm_flags & (VM_SHARED | VM_M << 
5665                 if (vma->vm_ops->huge_fault)  << 
5666                         ret = vma->vm_ops->hu << 
5667                         if (!(ret & VM_FAULT_ << 
5668                                 return ret;   << 
5669                 }                             << 
5670         }                                     << 
5671                                               << 
5672 split:                                        << 
5673         /* COW or write-notify handled on pte << 
5674         __split_huge_pmd(vma, vmf->pmd, vmf-> << 
5675                                                  3904 
5676         return VM_FAULT_FALLBACK;                3905         return VM_FAULT_FALLBACK;
5677 }                                                3906 }
5678                                                  3907 
5679 static vm_fault_t create_huge_pud(struct vm_f !! 3908 static inline bool vma_is_accessible(struct vm_area_struct *vma)
5680 {                                                3909 {
5681 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&   !! 3910         return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
5682         defined(CONFIG_HAVE_ARCH_TRANSPARENT_ !! 3911 }
5683         struct vm_area_struct *vma = vmf->vma !! 3912 
                                                   >> 3913 static int create_huge_pud(struct vm_fault *vmf)
                                                   >> 3914 {
                                                   >> 3915 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5684         /* No support for anonymous transpare    3916         /* No support for anonymous transparent PUD pages yet */
5685         if (vma_is_anonymous(vma))            !! 3917         if (vma_is_anonymous(vmf->vma))
5686                 return VM_FAULT_FALLBACK;        3918                 return VM_FAULT_FALLBACK;
5687         if (vma->vm_ops->huge_fault)          !! 3919         if (vmf->vma->vm_ops->huge_fault)
5688                 return vma->vm_ops->huge_faul !! 3920                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
5689 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */         3921 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5690         return VM_FAULT_FALLBACK;                3922         return VM_FAULT_FALLBACK;
5691 }                                                3923 }
5692                                                  3924 
5693 static vm_fault_t wp_huge_pud(struct vm_fault !! 3925 static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5694 {                                                3926 {
5695 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&   !! 3927 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5696         defined(CONFIG_HAVE_ARCH_TRANSPARENT_ << 
5697         struct vm_area_struct *vma = vmf->vma << 
5698         vm_fault_t ret;                       << 
5699                                               << 
5700         /* No support for anonymous transpare    3928         /* No support for anonymous transparent PUD pages yet */
5701         if (vma_is_anonymous(vma))            !! 3929         if (vma_is_anonymous(vmf->vma))
5702                 goto split;                   !! 3930                 return VM_FAULT_FALLBACK;
5703         if (vma->vm_flags & (VM_SHARED | VM_M !! 3931         if (vmf->vma->vm_ops->huge_fault)
5704                 if (vma->vm_ops->huge_fault)  !! 3932                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
5705                         ret = vma->vm_ops->hu !! 3933 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5706                         if (!(ret & VM_FAULT_ << 
5707                                 return ret;   << 
5708                 }                             << 
5709         }                                     << 
5710 split:                                        << 
5711         /* COW or write-notify not handled on << 
5712         __split_huge_pud(vma, vmf->pud, vmf-> << 
5713 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONF << 
5714         return VM_FAULT_FALLBACK;                3934         return VM_FAULT_FALLBACK;
5715 }                                                3935 }
5716                                                  3936 
5717 /*                                               3937 /*
5718  * These routines also need to handle stuff l    3938  * These routines also need to handle stuff like marking pages dirty
5719  * and/or accessed for architectures that don    3939  * and/or accessed for architectures that don't do it in hardware (most
5720  * RISC architectures).  The early dirtying i    3940  * RISC architectures).  The early dirtying is also good on the i386.
5721  *                                               3941  *
5722  * There is also a hook called "update_mmu_ca    3942  * There is also a hook called "update_mmu_cache()" that architectures
5723  * with external mmu caches can use to update    3943  * with external mmu caches can use to update those (ie the Sparc or
5724  * PowerPC hashed page tables that act as ext    3944  * PowerPC hashed page tables that act as extended TLBs).
5725  *                                               3945  *
5726  * We enter with non-exclusive mmap_lock (to  !! 3946  * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
5727  * concurrent faults).                           3947  * concurrent faults).
5728  *                                               3948  *
5729  * The mmap_lock may have been released depen !! 3949  * The mmap_sem may have been released depending on flags and our return value.
5730  * See filemap_fault() and __folio_lock_or_re !! 3950  * See filemap_fault() and __lock_page_or_retry().
5731  */                                              3951  */
5732 static vm_fault_t handle_pte_fault(struct vm_ !! 3952 static int handle_pte_fault(struct vm_fault *vmf)
5733 {                                                3953 {
5734         pte_t entry;                             3954         pte_t entry;
5735                                                  3955 
5736         if (unlikely(pmd_none(*vmf->pmd))) {     3956         if (unlikely(pmd_none(*vmf->pmd))) {
5737                 /*                               3957                 /*
5738                  * Leave __pte_alloc() until     3958                  * Leave __pte_alloc() until later: because vm_ops->fault may
5739                  * want to allocate huge page    3959                  * want to allocate huge page, and if we expose page table
5740                  * for an instant, it will be    3960                  * for an instant, it will be difficult to retract from
5741                  * concurrent faults and from    3961                  * concurrent faults and from rmap lookups.
5742                  */                              3962                  */
5743                 vmf->pte = NULL;                 3963                 vmf->pte = NULL;
5744                 vmf->flags &= ~FAULT_FLAG_ORI << 
5745         } else {                                 3964         } else {
                                                   >> 3965                 /* See comment in pte_alloc_one_map() */
                                                   >> 3966                 if (pmd_devmap_trans_unstable(vmf->pmd))
                                                   >> 3967                         return 0;
5746                 /*                               3968                 /*
5747                  * A regular pmd is establish    3969                  * A regular pmd is established and it can't morph into a huge
5748                  * pmd by anon khugepaged, si !! 3970                  * pmd from under us anymore at this point because we hold the
5749                  * mode; but shmem or file co !! 3971                  * mmap_sem read mode and khugepaged takes it in write mode.
5750                  * it into a huge pmd: just r !! 3972                  * So now it's safe to run pte_offset_map().
5751                  */                              3973                  */
5752                 vmf->pte = pte_offset_map_nol !! 3974                 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
5753                                               !! 3975                 vmf->orig_pte = *vmf->pte;
5754                 if (unlikely(!vmf->pte))      << 
5755                         return 0;             << 
5756                 vmf->orig_pte = ptep_get_lock << 
5757                 vmf->flags |= FAULT_FLAG_ORIG << 
5758                                                  3976 
                                                   >> 3977                 /*
                                                   >> 3978                  * some architectures can have larger ptes than wordsize,
                                                   >> 3979                  * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
                                                   >> 3980                  * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
                                                   >> 3981                  * accesses.  The code below just needs a consistent view
                                                   >> 3982                  * for the ifs and we later double check anyway with the
                                                   >> 3983                  * ptl lock held. So here a barrier will do.
                                                   >> 3984                  */
                                                   >> 3985                 barrier();
5759                 if (pte_none(vmf->orig_pte))     3986                 if (pte_none(vmf->orig_pte)) {
5760                         pte_unmap(vmf->pte);     3987                         pte_unmap(vmf->pte);
5761                         vmf->pte = NULL;         3988                         vmf->pte = NULL;
5762                 }                                3989                 }
5763         }                                        3990         }
5764                                                  3991 
5765         if (!vmf->pte)                        !! 3992         if (!vmf->pte) {
5766                 return do_pte_missing(vmf);   !! 3993                 if (vma_is_anonymous(vmf->vma))
                                                   >> 3994                         return do_anonymous_page(vmf);
                                                   >> 3995                 else
                                                   >> 3996                         return do_fault(vmf);
                                                   >> 3997         }
5767                                                  3998 
5768         if (!pte_present(vmf->orig_pte))         3999         if (!pte_present(vmf->orig_pte))
5769                 return do_swap_page(vmf);        4000                 return do_swap_page(vmf);
5770                                                  4001 
5771         if (pte_protnone(vmf->orig_pte) && vm    4002         if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5772                 return do_numa_page(vmf);        4003                 return do_numa_page(vmf);
5773                                                  4004 
                                                   >> 4005         vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
5774         spin_lock(vmf->ptl);                     4006         spin_lock(vmf->ptl);
5775         entry = vmf->orig_pte;                   4007         entry = vmf->orig_pte;
5776         if (unlikely(!pte_same(ptep_get(vmf-> !! 4008         if (unlikely(!pte_same(*vmf->pte, entry)))
5777                 update_mmu_tlb(vmf->vma, vmf- << 
5778                 goto unlock;                     4009                 goto unlock;
5779         }                                     !! 4010         if (vmf->flags & FAULT_FLAG_WRITE) {
5780         if (vmf->flags & (FAULT_FLAG_WRITE|FA << 
5781                 if (!pte_write(entry))           4011                 if (!pte_write(entry))
5782                         return do_wp_page(vmf    4012                         return do_wp_page(vmf);
5783                 else if (likely(vmf->flags &  !! 4013                 entry = pte_mkdirty(entry);
5784                         entry = pte_mkdirty(e << 
5785         }                                        4014         }
5786         entry = pte_mkyoung(entry);              4015         entry = pte_mkyoung(entry);
5787         if (ptep_set_access_flags(vmf->vma, v    4016         if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5788                                 vmf->flags &     4017                                 vmf->flags & FAULT_FLAG_WRITE)) {
5789                 update_mmu_cache_range(vmf, v !! 4018                 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
5790                                 vmf->pte, 1); << 
5791         } else {                                 4019         } else {
5792                 /* Skip spurious TLB flush fo << 
5793                 if (vmf->flags & FAULT_FLAG_T << 
5794                         goto unlock;          << 
5795                 /*                               4020                 /*
5796                  * This is needed only for pr    4021                  * This is needed only for protection faults but the arch code
5797                  * is not yet telling us if t    4022                  * is not yet telling us if this is a protection fault or not.
5798                  * This still avoids useless     4023                  * This still avoids useless tlb flushes for .text page faults
5799                  * with threads.                 4024                  * with threads.
5800                  */                              4025                  */
5801                 if (vmf->flags & FAULT_FLAG_W    4026                 if (vmf->flags & FAULT_FLAG_WRITE)
5802                         flush_tlb_fix_spuriou !! 4027                         flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
5803                                               << 
5804         }                                        4028         }
5805 unlock:                                          4029 unlock:
5806         pte_unmap_unlock(vmf->pte, vmf->ptl);    4030         pte_unmap_unlock(vmf->pte, vmf->ptl);
5807         return 0;                                4031         return 0;
5808 }                                                4032 }
5809                                                  4033 
5810 /*                                               4034 /*
5811  * On entry, we hold either the VMA lock or t !! 4035  * By the time we get here, we already hold the mm semaphore
5812  * (FAULT_FLAG_VMA_LOCK tells you which).  If !! 4036  *
5813  * the result, the mmap_lock is not held on e !! 4037  * The mmap_sem may have been released depending on flags and our
5814  * and __folio_lock_or_retry().               !! 4038  * return value.  See filemap_fault() and __lock_page_or_retry().
5815  */                                              4039  */
5816 static vm_fault_t __handle_mm_fault(struct vm !! 4040 static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5817                 unsigned long address, unsign !! 4041                 unsigned int flags)
5818 {                                                4042 {
5819         struct vm_fault vmf = {                  4043         struct vm_fault vmf = {
5820                 .vma = vma,                      4044                 .vma = vma,
5821                 .address = address & PAGE_MAS    4045                 .address = address & PAGE_MASK,
5822                 .real_address = address,      << 
5823                 .flags = flags,                  4046                 .flags = flags,
5824                 .pgoff = linear_page_index(vm    4047                 .pgoff = linear_page_index(vma, address),
5825                 .gfp_mask = __get_fault_gfp_m    4048                 .gfp_mask = __get_fault_gfp_mask(vma),
5826         };                                       4049         };
                                                   >> 4050         unsigned int dirty = flags & FAULT_FLAG_WRITE;
5827         struct mm_struct *mm = vma->vm_mm;       4051         struct mm_struct *mm = vma->vm_mm;
5828         unsigned long vm_flags = vma->vm_flag << 
5829         pgd_t *pgd;                              4052         pgd_t *pgd;
5830         p4d_t *p4d;                              4053         p4d_t *p4d;
5831         vm_fault_t ret;                       !! 4054         int ret;
5832                                                  4055 
5833         pgd = pgd_offset(mm, address);           4056         pgd = pgd_offset(mm, address);
5834         p4d = p4d_alloc(mm, pgd, address);       4057         p4d = p4d_alloc(mm, pgd, address);
5835         if (!p4d)                                4058         if (!p4d)
5836                 return VM_FAULT_OOM;             4059                 return VM_FAULT_OOM;
5837                                                  4060 
5838         vmf.pud = pud_alloc(mm, p4d, address)    4061         vmf.pud = pud_alloc(mm, p4d, address);
5839         if (!vmf.pud)                            4062         if (!vmf.pud)
5840                 return VM_FAULT_OOM;             4063                 return VM_FAULT_OOM;
5841 retry_pud:                                    !! 4064         if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
5842         if (pud_none(*vmf.pud) &&             << 
5843             thp_vma_allowable_order(vma, vm_f << 
5844                                 TVA_IN_PF | T << 
5845                 ret = create_huge_pud(&vmf);     4065                 ret = create_huge_pud(&vmf);
5846                 if (!(ret & VM_FAULT_FALLBACK    4066                 if (!(ret & VM_FAULT_FALLBACK))
5847                         return ret;              4067                         return ret;
5848         } else {                                 4068         } else {
5849                 pud_t orig_pud = *vmf.pud;       4069                 pud_t orig_pud = *vmf.pud;
5850                                                  4070 
5851                 barrier();                       4071                 barrier();
5852                 if (pud_trans_huge(orig_pud)     4072                 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5853                                                  4073 
5854                         /*                    !! 4074                         /* NUMA case for anonymous PUDs would go here */
5855                          * TODO once we suppo !! 4075 
5856                          * FAULT_FLAG_UNSHARE !! 4076                         if (dirty && !pud_write(orig_pud)) {
5857                          */                   << 
5858                         if ((flags & FAULT_FL << 
5859                                 ret = wp_huge    4077                                 ret = wp_huge_pud(&vmf, orig_pud);
5860                                 if (!(ret & V    4078                                 if (!(ret & VM_FAULT_FALLBACK))
5861                                         retur    4079                                         return ret;
5862                         } else {                 4080                         } else {
5863                                 huge_pud_set_    4081                                 huge_pud_set_accessed(&vmf, orig_pud);
5864                                 return 0;        4082                                 return 0;
5865                         }                        4083                         }
5866                 }                                4084                 }
5867         }                                        4085         }
5868                                                  4086 
5869         vmf.pmd = pmd_alloc(mm, vmf.pud, addr    4087         vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5870         if (!vmf.pmd)                            4088         if (!vmf.pmd)
5871                 return VM_FAULT_OOM;             4089                 return VM_FAULT_OOM;
5872                                               !! 4090         if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
5873         /* Huge pud page fault raced with pmd << 
5874         if (pud_trans_unstable(vmf.pud))      << 
5875                 goto retry_pud;               << 
5876                                               << 
5877         if (pmd_none(*vmf.pmd) &&             << 
5878             thp_vma_allowable_order(vma, vm_f << 
5879                                 TVA_IN_PF | T << 
5880                 ret = create_huge_pmd(&vmf);     4091                 ret = create_huge_pmd(&vmf);
5881                 if (!(ret & VM_FAULT_FALLBACK    4092                 if (!(ret & VM_FAULT_FALLBACK))
5882                         return ret;              4093                         return ret;
5883         } else {                                 4094         } else {
5884                 vmf.orig_pmd = pmdp_get_lockl !! 4095                 pmd_t orig_pmd = *vmf.pmd;
5885                                                  4096 
5886                 if (unlikely(is_swap_pmd(vmf. !! 4097                 barrier();
                                                   >> 4098                 if (unlikely(is_swap_pmd(orig_pmd))) {
5887                         VM_BUG_ON(thp_migrati    4099                         VM_BUG_ON(thp_migration_supported() &&
5888                                           !is !! 4100                                           !is_pmd_migration_entry(orig_pmd));
5889                         if (is_pmd_migration_ !! 4101                         if (is_pmd_migration_entry(orig_pmd))
5890                                 pmd_migration    4102                                 pmd_migration_entry_wait(mm, vmf.pmd);
5891                         return 0;                4103                         return 0;
5892                 }                                4104                 }
5893                 if (pmd_trans_huge(vmf.orig_p !! 4105                 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
5894                         if (pmd_protnone(vmf. !! 4106                         if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
5895                                 return do_hug !! 4107                                 return do_huge_pmd_numa_page(&vmf, orig_pmd);
5896                                               !! 4108 
5897                         if ((flags & (FAULT_F !! 4109                         if (dirty && !pmd_write(orig_pmd)) {
5898                             !pmd_write(vmf.or !! 4110                                 ret = wp_huge_pmd(&vmf, orig_pmd);
5899                                 ret = wp_huge << 
5900                                 if (!(ret & V    4111                                 if (!(ret & VM_FAULT_FALLBACK))
5901                                         retur    4112                                         return ret;
5902                         } else {                 4113                         } else {
5903                                 huge_pmd_set_ !! 4114                                 huge_pmd_set_accessed(&vmf, orig_pmd);
5904                                 return 0;        4115                                 return 0;
5905                         }                        4116                         }
5906                 }                                4117                 }
5907         }                                        4118         }
5908                                                  4119 
5909         return handle_pte_fault(&vmf);           4120         return handle_pte_fault(&vmf);
5910 }                                                4121 }
5911                                                  4122 
5912 /**                                           << 
5913  * mm_account_fault - Do page fault accountin << 
5914  * @mm: mm from which memcg should be extract << 
5915  * @regs: the pt_regs struct pointer.  When s << 
5916  *        of perf event counters, but we'll s << 
5917  *        the task who triggered this page fa << 
5918  * @address: the faulted address.             << 
5919  * @flags: the fault flags.                   << 
5920  * @ret: the fault retcode.                   << 
5921  *                                            << 
5922  * This will take care of most of the page fa << 
5923  * will also include the PERF_COUNT_SW_PAGE_F << 
5924  * updates.  However, note that the handling  << 
5925  * still be in per-arch page fault handlers a << 
5926  */                                           << 
5927 static inline void mm_account_fault(struct mm << 
5928                                     unsigned  << 
5929                                     vm_fault_ << 
5930 {                                             << 
5931         bool major;                           << 
5932                                               << 
5933         /* Incomplete faults will be accounte << 
5934         if (ret & VM_FAULT_RETRY)             << 
5935                 return;                       << 
5936                                               << 
5937         /*                                    << 
5938          * To preserve the behavior of older  << 
5939          * both successful and failed faults, << 
5940          * which ignore failed cases.         << 
5941          */                                   << 
5942         count_vm_event(PGFAULT);              << 
5943         count_memcg_event_mm(mm, PGFAULT);    << 
5944                                               << 
5945         /*                                    << 
5946          * Do not account for unsuccessful fa << 
5947          * valid).  That includes arch_vma_ac << 
5948          * reaching here. So this is not a "t << 
5949          * counter.  We should use the hw pro << 
5950          */                                   << 
5951         if (ret & VM_FAULT_ERROR)             << 
5952                 return;                       << 
5953                                               << 
5954         /*                                    << 
5955          * We define the fault as a major fau << 
5956          * is VM_FAULT_MAJOR, or if it retrie << 
5957          * handle it immediately previously). << 
5958          */                                   << 
5959         major = (ret & VM_FAULT_MAJOR) || (fl << 
5960                                               << 
5961         if (major)                            << 
5962                 current->maj_flt++;           << 
5963         else                                  << 
5964                 current->min_flt++;           << 
5965                                               << 
5966         /*                                    << 
5967          * If the fault is done for GUP, regs << 
5968          * accounting for the per thread faul << 
5969          * fault, and we skip the perf event  << 
5970          */                                   << 
5971         if (!regs)                            << 
5972                 return;                       << 
5973                                               << 
5974         if (major)                            << 
5975                 perf_sw_event(PERF_COUNT_SW_P << 
5976         else                                  << 
5977                 perf_sw_event(PERF_COUNT_SW_P << 
5978 }                                             << 
5979                                               << 
5980 #ifdef CONFIG_LRU_GEN                         << 
5981 static void lru_gen_enter_fault(struct vm_are << 
5982 {                                             << 
5983         /* the LRU algorithm only applies to  << 
5984         current->in_lru_fault = vma_has_recen << 
5985 }                                             << 
5986                                               << 
5987 static void lru_gen_exit_fault(void)          << 
5988 {                                             << 
5989         current->in_lru_fault = false;        << 
5990 }                                             << 
5991 #else                                         << 
5992 static void lru_gen_enter_fault(struct vm_are << 
5993 {                                             << 
5994 }                                             << 
5995                                               << 
5996 static void lru_gen_exit_fault(void)          << 
5997 {                                             << 
5998 }                                             << 
5999 #endif /* CONFIG_LRU_GEN */                   << 
6000                                               << 
6001 static vm_fault_t sanitize_fault_flags(struct << 
6002                                        unsign << 
6003 {                                             << 
6004         if (unlikely(*flags & FAULT_FLAG_UNSH << 
6005                 if (WARN_ON_ONCE(*flags & FAU << 
6006                         return VM_FAULT_SIGSE << 
6007                 /*                            << 
6008                  * FAULT_FLAG_UNSHARE only ap << 
6009                  * just treat it like an ordi << 
6010                  */                           << 
6011                 if (!is_cow_mapping(vma->vm_f << 
6012                         *flags &= ~FAULT_FLAG << 
6013         } else if (*flags & FAULT_FLAG_WRITE) << 
6014                 /* Write faults on read-only  << 
6015                 if (WARN_ON_ONCE(!(vma->vm_fl << 
6016                         return VM_FAULT_SIGSE << 
6017                 /* ... and FOLL_FORCE only ap << 
6018                 if (WARN_ON_ONCE(!(vma->vm_fl << 
6019                                  !is_cow_mapp << 
6020                         return VM_FAULT_SIGSE << 
6021         }                                     << 
6022 #ifdef CONFIG_PER_VMA_LOCK                    << 
6023         /*                                    << 
6024          * Per-VMA locks can't be used with F << 
6025          * the assumption that lock is droppe << 
6026          */                                   << 
6027         if (WARN_ON_ONCE((*flags &            << 
6028                         (FAULT_FLAG_VMA_LOCK  << 
6029                         (FAULT_FLAG_VMA_LOCK  << 
6030                 return VM_FAULT_SIGSEGV;      << 
6031 #endif                                        << 
6032                                               << 
6033         return 0;                             << 
6034 }                                             << 
6035                                               << 
6036 /*                                               4123 /*
6037  * By the time we get here, we already hold t    4124  * By the time we get here, we already hold the mm semaphore
6038  *                                               4125  *
6039  * The mmap_lock may have been released depen !! 4126  * The mmap_sem may have been released depending on flags and our
6040  * return value.  See filemap_fault() and __f !! 4127  * return value.  See filemap_fault() and __lock_page_or_retry().
6041  */                                              4128  */
6042 vm_fault_t handle_mm_fault(struct vm_area_str !! 4129 int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6043                            unsigned int flags !! 4130                 unsigned int flags)
6044 {                                                4131 {
6045         /* If the fault handler drops the mma !! 4132         int ret;
6046         struct mm_struct *mm = vma->vm_mm;    << 
6047         vm_fault_t ret;                       << 
6048         bool is_droppable;                    << 
6049                                                  4133 
6050         __set_current_state(TASK_RUNNING);       4134         __set_current_state(TASK_RUNNING);
6051                                                  4135 
6052         ret = sanitize_fault_flags(vma, &flag !! 4136         count_vm_event(PGFAULT);
6053         if (ret)                              !! 4137         count_memcg_event_mm(vma->vm_mm, PGFAULT);
6054                 goto out;                     !! 4138 
                                                   >> 4139         /* do counter updates before entering really critical section. */
                                                   >> 4140         check_sync_rss_stat(current);
6055                                                  4141 
6056         if (!arch_vma_access_permitted(vma, f    4142         if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6057                                             f    4143                                             flags & FAULT_FLAG_INSTRUCTION,
6058                                             f !! 4144                                             flags & FAULT_FLAG_REMOTE))
6059                 ret = VM_FAULT_SIGSEGV;       !! 4145                 return VM_FAULT_SIGSEGV;
6060                 goto out;                     << 
6061         }                                     << 
6062                                               << 
6063         is_droppable = !!(vma->vm_flags & VM_ << 
6064                                                  4146 
6065         /*                                       4147         /*
6066          * Enable the memcg OOM handling for     4148          * Enable the memcg OOM handling for faults triggered in user
6067          * space.  Kernel faults are handled     4149          * space.  Kernel faults are handled more gracefully.
6068          */                                      4150          */
6069         if (flags & FAULT_FLAG_USER)             4151         if (flags & FAULT_FLAG_USER)
6070                 mem_cgroup_enter_user_fault() !! 4152                 mem_cgroup_oom_enable();
6071                                               << 
6072         lru_gen_enter_fault(vma);             << 
6073                                                  4153 
6074         if (unlikely(is_vm_hugetlb_page(vma))    4154         if (unlikely(is_vm_hugetlb_page(vma)))
6075                 ret = hugetlb_fault(vma->vm_m    4155                 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6076         else                                     4156         else
6077                 ret = __handle_mm_fault(vma,     4157                 ret = __handle_mm_fault(vma, address, flags);
6078                                                  4158 
6079         /*                                    << 
6080          * Warning: It is no longer safe to d << 
6081          * because mmap_lock might have been  << 
6082          * vma might be destroyed from undern << 
6083          */                                   << 
6084                                               << 
6085         lru_gen_exit_fault();                 << 
6086                                               << 
6087         /* If the mapping is droppable, then  << 
6088         if (is_droppable)                     << 
6089                 ret &= ~VM_FAULT_OOM;         << 
6090                                               << 
6091         if (flags & FAULT_FLAG_USER) {           4159         if (flags & FAULT_FLAG_USER) {
6092                 mem_cgroup_exit_user_fault(); !! 4160                 mem_cgroup_oom_disable();
6093                 /*                               4161                 /*
6094                  * The task may have entered     4162                  * The task may have entered a memcg OOM situation but
6095                  * if the allocation error wa    4163                  * if the allocation error was handled gracefully (no
6096                  * VM_FAULT_OOM), there is no    4164                  * VM_FAULT_OOM), there is no need to kill anything.
6097                  * Just clean up the OOM stat    4165                  * Just clean up the OOM state peacefully.
6098                  */                              4166                  */
6099                 if (task_in_memcg_oom(current    4167                 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6100                         mem_cgroup_oom_synchr    4168                         mem_cgroup_oom_synchronize(false);
6101         }                                        4169         }
6102 out:                                          << 
6103         mm_account_fault(mm, regs, address, f << 
6104                                                  4170 
6105         return ret;                              4171         return ret;
6106 }                                                4172 }
6107 EXPORT_SYMBOL_GPL(handle_mm_fault);              4173 EXPORT_SYMBOL_GPL(handle_mm_fault);
6108                                                  4174 
6109 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA            << 
6110 #include <linux/extable.h>                    << 
6111                                               << 
6112 static inline bool get_mmap_lock_carefully(st << 
6113 {                                             << 
6114         if (likely(mmap_read_trylock(mm)))    << 
6115                 return true;                  << 
6116                                               << 
6117         if (regs && !user_mode(regs)) {       << 
6118                 unsigned long ip = exception_ << 
6119                 if (!search_exception_tables( << 
6120                         return false;         << 
6121         }                                     << 
6122                                               << 
6123         return !mmap_read_lock_killable(mm);  << 
6124 }                                             << 
6125                                               << 
6126 static inline bool mmap_upgrade_trylock(struc << 
6127 {                                             << 
6128         /*                                    << 
6129          * We don't have this operation yet.  << 
6130          *                                    << 
6131          * It should be easy enough to do: it << 
6132          *    atomic_long_try_cmpxchg_acquire << 
6133          * from RWSEM_READER_BIAS -> RWSEM_WR << 
6134          * it also needs the proper lockdep m << 
6135          */                                   << 
6136         return false;                         << 
6137 }                                             << 
6138                                               << 
6139 static inline bool upgrade_mmap_lock_carefull << 
6140 {                                             << 
6141         mmap_read_unlock(mm);                 << 
6142         if (regs && !user_mode(regs)) {       << 
6143                 unsigned long ip = exception_ << 
6144                 if (!search_exception_tables( << 
6145                         return false;         << 
6146         }                                     << 
6147         return !mmap_write_lock_killable(mm); << 
6148 }                                             << 
6149                                               << 
6150 /*                                            << 
6151  * Helper for page fault handling.            << 
6152  *                                            << 
6153  * This is kind of equivalend to "mmap_read_l << 
6154  * by "find_extend_vma()", except it's a lot  << 
6155  * the locking (and will drop the lock on fai << 
6156  *                                            << 
6157  * For example, if we have a kernel bug that  << 
6158  * fault, we don't want to just use mmap_read << 
6159  * the mm lock, because that would deadlock i << 
6160  * to happen while we're holding the mm lock  << 
6161  *                                            << 
6162  * So this checks the exception tables on ker << 
6163  * order to only do this all for instructions << 
6164  * expected to fault.                         << 
6165  *                                            << 
6166  * We can also actually take the mm lock for  << 
6167  * need to extend the vma, which helps the VM << 
6168  */                                           << 
6169 struct vm_area_struct *lock_mm_and_find_vma(s << 
6170                         unsigned long addr, s << 
6171 {                                             << 
6172         struct vm_area_struct *vma;           << 
6173                                               << 
6174         if (!get_mmap_lock_carefully(mm, regs << 
6175                 return NULL;                  << 
6176                                               << 
6177         vma = find_vma(mm, addr);             << 
6178         if (likely(vma && (vma->vm_start <= a << 
6179                 return vma;                   << 
6180                                               << 
6181         /*                                    << 
6182          * Well, dang. We might still be succ << 
6183          * if we can extend a vma to do so.   << 
6184          */                                   << 
6185         if (!vma || !(vma->vm_flags & VM_GROW << 
6186                 mmap_read_unlock(mm);         << 
6187                 return NULL;                  << 
6188         }                                     << 
6189                                               << 
6190         /*                                    << 
6191          * We can try to upgrade the mmap loc << 
6192          * in which case we can continue to u << 
6193          * we already looked up.              << 
6194          *                                    << 
6195          * Otherwise we'll have to drop the m << 
6196          * re-take it, and also look up the v << 
6197          * re-checking it.                    << 
6198          */                                   << 
6199         if (!mmap_upgrade_trylock(mm)) {      << 
6200                 if (!upgrade_mmap_lock_carefu << 
6201                         return NULL;          << 
6202                                               << 
6203                 vma = find_vma(mm, addr);     << 
6204                 if (!vma)                     << 
6205                         goto fail;            << 
6206                 if (vma->vm_start <= addr)    << 
6207                         goto success;         << 
6208                 if (!(vma->vm_flags & VM_GROW << 
6209                         goto fail;            << 
6210         }                                     << 
6211                                               << 
6212         if (expand_stack_locked(vma, addr))   << 
6213                 goto fail;                    << 
6214                                               << 
6215 success:                                      << 
6216         mmap_write_downgrade(mm);             << 
6217         return vma;                           << 
6218                                               << 
6219 fail:                                         << 
6220         mmap_write_unlock(mm);                << 
6221         return NULL;                          << 
6222 }                                             << 
6223 #endif                                        << 
6224                                               << 
6225 #ifdef CONFIG_PER_VMA_LOCK                    << 
6226 /*                                            << 
6227  * Lookup and lock a VMA under RCU protection << 
6228  * stable and not isolated. If the VMA is not << 
6229  * function returns NULL.                     << 
6230  */                                           << 
6231 struct vm_area_struct *lock_vma_under_rcu(str << 
6232                                           uns << 
6233 {                                             << 
6234         MA_STATE(mas, &mm->mm_mt, address, ad << 
6235         struct vm_area_struct *vma;           << 
6236                                               << 
6237         rcu_read_lock();                      << 
6238 retry:                                        << 
6239         vma = mas_walk(&mas);                 << 
6240         if (!vma)                             << 
6241                 goto inval;                   << 
6242                                               << 
6243         if (!vma_start_read(vma))             << 
6244                 goto inval;                   << 
6245                                               << 
6246         /* Check if the VMA got isolated afte << 
6247         if (vma->detached) {                  << 
6248                 vma_end_read(vma);            << 
6249                 count_vm_vma_lock_event(VMA_L << 
6250                 /* The area was replaced with << 
6251                 goto retry;                   << 
6252         }                                     << 
6253         /*                                    << 
6254          * At this point, we have a stable re << 
6255          * locked and we know it hasn't alrea << 
6256          * From here on, we can access the VM << 
6257          * fields are accessible for RCU read << 
6258          */                                   << 
6259                                               << 
6260         /* Check since vm_start/vm_end might  << 
6261         if (unlikely(address < vma->vm_start  << 
6262                 goto inval_end_read;          << 
6263                                               << 
6264         rcu_read_unlock();                    << 
6265         return vma;                           << 
6266                                               << 
6267 inval_end_read:                               << 
6268         vma_end_read(vma);                    << 
6269 inval:                                        << 
6270         rcu_read_unlock();                    << 
6271         count_vm_vma_lock_event(VMA_LOCK_ABOR << 
6272         return NULL;                          << 
6273 }                                             << 
6274 #endif /* CONFIG_PER_VMA_LOCK */              << 
6275                                               << 
6276 #ifndef __PAGETABLE_P4D_FOLDED                   4175 #ifndef __PAGETABLE_P4D_FOLDED
6277 /*                                               4176 /*
6278  * Allocate p4d page table.                      4177  * Allocate p4d page table.
6279  * We've already handled the fast-path in-lin    4178  * We've already handled the fast-path in-line.
6280  */                                              4179  */
6281 int __p4d_alloc(struct mm_struct *mm, pgd_t *    4180 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6282 {                                                4181 {
6283         p4d_t *new = p4d_alloc_one(mm, addres    4182         p4d_t *new = p4d_alloc_one(mm, address);
6284         if (!new)                                4183         if (!new)
6285                 return -ENOMEM;                  4184                 return -ENOMEM;
6286                                                  4185 
                                                   >> 4186         smp_wmb(); /* See comment in __pte_alloc */
                                                   >> 4187 
6287         spin_lock(&mm->page_table_lock);         4188         spin_lock(&mm->page_table_lock);
6288         if (pgd_present(*pgd)) {        /* An !! 4189         if (pgd_present(*pgd))          /* Another has populated it */
6289                 p4d_free(mm, new);               4190                 p4d_free(mm, new);
6290         } else {                              !! 4191         else
6291                 smp_wmb(); /* See comment in  << 
6292                 pgd_populate(mm, pgd, new);      4192                 pgd_populate(mm, pgd, new);
6293         }                                     << 
6294         spin_unlock(&mm->page_table_lock);       4193         spin_unlock(&mm->page_table_lock);
6295         return 0;                                4194         return 0;
6296 }                                                4195 }
6297 #endif /* __PAGETABLE_P4D_FOLDED */              4196 #endif /* __PAGETABLE_P4D_FOLDED */
6298                                                  4197 
6299 #ifndef __PAGETABLE_PUD_FOLDED                   4198 #ifndef __PAGETABLE_PUD_FOLDED
6300 /*                                               4199 /*
6301  * Allocate page upper directory.                4200  * Allocate page upper directory.
6302  * We've already handled the fast-path in-lin    4201  * We've already handled the fast-path in-line.
6303  */                                              4202  */
6304 int __pud_alloc(struct mm_struct *mm, p4d_t *    4203 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6305 {                                                4204 {
6306         pud_t *new = pud_alloc_one(mm, addres    4205         pud_t *new = pud_alloc_one(mm, address);
6307         if (!new)                                4206         if (!new)
6308                 return -ENOMEM;                  4207                 return -ENOMEM;
6309                                                  4208 
                                                   >> 4209         smp_wmb(); /* See comment in __pte_alloc */
                                                   >> 4210 
6310         spin_lock(&mm->page_table_lock);         4211         spin_lock(&mm->page_table_lock);
                                                   >> 4212 #ifndef __ARCH_HAS_5LEVEL_HACK
6311         if (!p4d_present(*p4d)) {                4213         if (!p4d_present(*p4d)) {
6312                 mm_inc_nr_puds(mm);              4214                 mm_inc_nr_puds(mm);
6313                 smp_wmb(); /* See comment in  << 
6314                 p4d_populate(mm, p4d, new);      4215                 p4d_populate(mm, p4d, new);
6315         } else  /* Another has populated it *    4216         } else  /* Another has populated it */
6316                 pud_free(mm, new);               4217                 pud_free(mm, new);
                                                   >> 4218 #else
                                                   >> 4219         if (!pgd_present(*p4d)) {
                                                   >> 4220                 mm_inc_nr_puds(mm);
                                                   >> 4221                 pgd_populate(mm, p4d, new);
                                                   >> 4222         } else  /* Another has populated it */
                                                   >> 4223                 pud_free(mm, new);
                                                   >> 4224 #endif /* __ARCH_HAS_5LEVEL_HACK */
6317         spin_unlock(&mm->page_table_lock);       4225         spin_unlock(&mm->page_table_lock);
6318         return 0;                                4226         return 0;
6319 }                                                4227 }
6320 #endif /* __PAGETABLE_PUD_FOLDED */              4228 #endif /* __PAGETABLE_PUD_FOLDED */
6321                                                  4229 
6322 #ifndef __PAGETABLE_PMD_FOLDED                   4230 #ifndef __PAGETABLE_PMD_FOLDED
6323 /*                                               4231 /*
6324  * Allocate page middle directory.               4232  * Allocate page middle directory.
6325  * We've already handled the fast-path in-lin    4233  * We've already handled the fast-path in-line.
6326  */                                              4234  */
6327 int __pmd_alloc(struct mm_struct *mm, pud_t *    4235 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6328 {                                                4236 {
6329         spinlock_t *ptl;                         4237         spinlock_t *ptl;
6330         pmd_t *new = pmd_alloc_one(mm, addres    4238         pmd_t *new = pmd_alloc_one(mm, address);
6331         if (!new)                                4239         if (!new)
6332                 return -ENOMEM;                  4240                 return -ENOMEM;
6333                                                  4241 
                                                   >> 4242         smp_wmb(); /* See comment in __pte_alloc */
                                                   >> 4243 
6334         ptl = pud_lock(mm, pud);                 4244         ptl = pud_lock(mm, pud);
                                                   >> 4245 #ifndef __ARCH_HAS_4LEVEL_HACK
6335         if (!pud_present(*pud)) {                4246         if (!pud_present(*pud)) {
6336                 mm_inc_nr_pmds(mm);              4247                 mm_inc_nr_pmds(mm);
6337                 smp_wmb(); /* See comment in  << 
6338                 pud_populate(mm, pud, new);      4248                 pud_populate(mm, pud, new);
6339         } else {        /* Another has popula !! 4249         } else  /* Another has populated it */
6340                 pmd_free(mm, new);               4250                 pmd_free(mm, new);
6341         }                                     !! 4251 #else
                                                   >> 4252         if (!pgd_present(*pud)) {
                                                   >> 4253                 mm_inc_nr_pmds(mm);
                                                   >> 4254                 pgd_populate(mm, pud, new);
                                                   >> 4255         } else /* Another has populated it */
                                                   >> 4256                 pmd_free(mm, new);
                                                   >> 4257 #endif /* __ARCH_HAS_4LEVEL_HACK */
6342         spin_unlock(ptl);                        4258         spin_unlock(ptl);
6343         return 0;                                4259         return 0;
6344 }                                                4260 }
6345 #endif /* __PAGETABLE_PMD_FOLDED */              4261 #endif /* __PAGETABLE_PMD_FOLDED */
6346                                                  4262 
6347 static inline void pfnmap_args_setup(struct f !! 4263 static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
6348                                      spinlock !! 4264                             unsigned long *start, unsigned long *end,
6349                                      pgprot_t !! 4265                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
6350                                      unsigned << 
6351                                      bool spe << 
6352 {                                             << 
6353         args->lock = lock;                    << 
6354         args->ptep = ptep;                    << 
6355         args->pfn = pfn_base + ((args->addres << 
6356         args->pgprot = pgprot;                << 
6357         args->writable = writable;            << 
6358         args->special = special;              << 
6359 }                                             << 
6360                                               << 
6361 static inline void pfnmap_lockdep_assert(stru << 
6362 {                                             << 
6363 #ifdef CONFIG_LOCKDEP                         << 
6364         struct file *file = vma->vm_file;     << 
6365         struct address_space *mapping = file  << 
6366                                               << 
6367         if (mapping)                          << 
6368                 lockdep_assert(lockdep_is_hel << 
6369                                lockdep_is_hel << 
6370         else                                  << 
6371                 lockdep_assert(lockdep_is_hel << 
6372 #endif                                        << 
6373 }                                             << 
6374                                               << 
6375 /**                                           << 
6376  * follow_pfnmap_start() - Look up a pfn mapp << 
6377  * @args: Pointer to struct @follow_pfnmap_ar << 
6378  *                                            << 
6379  * The caller needs to setup args->vma and ar << 
6380  * virtual address as the target of such look << 
6381  * the results will be put into other output  << 
6382  *                                            << 
6383  * After the caller finished using the fields << 
6384  * another follow_pfnmap_end() to proper rele << 
6385  * of such look up request.                   << 
6386  *                                            << 
6387  * During the start() and end() calls, the re << 
6388  * as proper locks will be held.  After the e << 
6389  * in @follow_pfnmap_args will be invalid to  << 
6390  * use of such information after end() may re << 
6391  * by the caller with page table updates, oth << 
6392  * security bug.                              << 
6393  *                                            << 
6394  * If the PTE maps a refcounted page, callers << 
6395  * against invalidation with MMU notifiers; o << 
6396  * a later point in time can trigger use-afte << 
6397  *                                            << 
6398  * Only IO mappings and raw PFN mappings are  << 
6399  * should be taken for read, and the mmap sem << 
6400  * before the end() is invoked.               << 
6401  *                                            << 
6402  * This function must not be used to modify P << 
6403  *                                            << 
6404  * Return: zero on success, negative otherwis << 
6405  */                                           << 
6406 int follow_pfnmap_start(struct follow_pfnmap_ << 
6407 {                                                4266 {
6408         struct vm_area_struct *vma = args->vm !! 4267         pgd_t *pgd;
6409         unsigned long address = args->address !! 4268         p4d_t *p4d;
6410         struct mm_struct *mm = vma->vm_mm;    !! 4269         pud_t *pud;
6411         spinlock_t *lock;                     !! 4270         pmd_t *pmd;
6412         pgd_t *pgdp;                          !! 4271         pte_t *ptep;
6413         p4d_t *p4dp, p4d;                     << 
6414         pud_t *pudp, pud;                     << 
6415         pmd_t *pmdp, pmd;                     << 
6416         pte_t *ptep, pte;                     << 
6417                                               << 
6418         pfnmap_lockdep_assert(vma);           << 
6419                                                  4272 
6420         if (unlikely(address < vma->vm_start  !! 4273         pgd = pgd_offset(mm, address);
                                                   >> 4274         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
6421                 goto out;                        4275                 goto out;
6422                                                  4276 
6423         if (!(vma->vm_flags & (VM_IO | VM_PFN !! 4277         p4d = p4d_offset(pgd, address);
6424                 goto out;                     !! 4278         if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
6425 retry:                                        << 
6426         pgdp = pgd_offset(mm, address);       << 
6427         if (pgd_none(*pgdp) || unlikely(pgd_b << 
6428                 goto out;                        4279                 goto out;
6429                                                  4280 
6430         p4dp = p4d_offset(pgdp, address);     !! 4281         pud = pud_offset(p4d, address);
6431         p4d = READ_ONCE(*p4dp);               !! 4282         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
6432         if (p4d_none(p4d) || unlikely(p4d_bad << 
6433                 goto out;                        4283                 goto out;
6434                                                  4284 
6435         pudp = pud_offset(p4dp, address);     !! 4285         pmd = pmd_offset(pud, address);
6436         pud = READ_ONCE(*pudp);               !! 4286         VM_BUG_ON(pmd_trans_huge(*pmd));
6437         if (pud_none(pud))                    << 
6438                 goto out;                     << 
6439         if (pud_leaf(pud)) {                  << 
6440                 lock = pud_lock(mm, pudp);    << 
6441                 if (!unlikely(pud_leaf(pud))) << 
6442                         spin_unlock(lock);    << 
6443                         goto retry;           << 
6444                 }                             << 
6445                 pfnmap_args_setup(args, lock, << 
6446                                   pud_pfn(pud << 
6447                                   pud_special << 
6448                 return 0;                     << 
6449         }                                     << 
6450                                                  4287 
6451         pmdp = pmd_offset(pudp, address);     !! 4288         if (pmd_huge(*pmd)) {
6452         pmd = pmdp_get_lockless(pmdp);        !! 4289                 if (!pmdpp)
6453         if (pmd_leaf(pmd)) {                  !! 4290                         goto out;
6454                 lock = pmd_lock(mm, pmdp);    !! 4291 
6455                 if (!unlikely(pmd_leaf(pmd))) !! 4292                 if (start && end) {
6456                         spin_unlock(lock);    !! 4293                         *start = address & PMD_MASK;
6457                         goto retry;           !! 4294                         *end = *start + PMD_SIZE;
6458                 }                             !! 4295                         mmu_notifier_invalidate_range_start(mm, *start, *end);
6459                 pfnmap_args_setup(args, lock, !! 4296                 }
6460                                   pmd_pfn(pmd !! 4297                 *ptlp = pmd_lock(mm, pmd);
6461                                   pmd_special !! 4298                 if (pmd_huge(*pmd)) {
6462                 return 0;                     !! 4299                         *pmdpp = pmd;
                                                   >> 4300                         return 0;
                                                   >> 4301                 }
                                                   >> 4302                 spin_unlock(*ptlp);
                                                   >> 4303                 if (start && end)
                                                   >> 4304                         mmu_notifier_invalidate_range_end(mm, *start, *end);
6463         }                                        4305         }
6464                                                  4306 
6465         ptep = pte_offset_map_lock(mm, pmdp,  !! 4307         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
6466         if (!ptep)                            << 
6467                 goto out;                        4308                 goto out;
6468         pte = ptep_get(ptep);                 !! 4309 
6469         if (!pte_present(pte))                !! 4310         if (start && end) {
                                                   >> 4311                 *start = address & PAGE_MASK;
                                                   >> 4312                 *end = *start + PAGE_SIZE;
                                                   >> 4313                 mmu_notifier_invalidate_range_start(mm, *start, *end);
                                                   >> 4314         }
                                                   >> 4315         ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
                                                   >> 4316         if (!pte_present(*ptep))
6470                 goto unlock;                     4317                 goto unlock;
6471         pfnmap_args_setup(args, lock, ptep, p !! 4318         *ptepp = ptep;
6472                           pte_pfn(pte), PAGE_ << 
6473                           pte_special(pte));  << 
6474         return 0;                                4319         return 0;
6475 unlock:                                          4320 unlock:
6476         pte_unmap_unlock(ptep, lock);         !! 4321         pte_unmap_unlock(ptep, *ptlp);
                                                   >> 4322         if (start && end)
                                                   >> 4323                 mmu_notifier_invalidate_range_end(mm, *start, *end);
6477 out:                                             4324 out:
6478         return -EINVAL;                          4325         return -EINVAL;
6479 }                                                4326 }
6480 EXPORT_SYMBOL_GPL(follow_pfnmap_start);       !! 4327 
                                                   >> 4328 static inline int follow_pte(struct mm_struct *mm, unsigned long address,
                                                   >> 4329                              pte_t **ptepp, spinlock_t **ptlp)
                                                   >> 4330 {
                                                   >> 4331         int res;
                                                   >> 4332 
                                                   >> 4333         /* (void) is needed to make gcc happy */
                                                   >> 4334         (void) __cond_lock(*ptlp,
                                                   >> 4335                            !(res = __follow_pte_pmd(mm, address, NULL, NULL,
                                                   >> 4336                                                     ptepp, NULL, ptlp)));
                                                   >> 4337         return res;
                                                   >> 4338 }
                                                   >> 4339 
                                                   >> 4340 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
                                                   >> 4341                              unsigned long *start, unsigned long *end,
                                                   >> 4342                              pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
                                                   >> 4343 {
                                                   >> 4344         int res;
                                                   >> 4345 
                                                   >> 4346         /* (void) is needed to make gcc happy */
                                                   >> 4347         (void) __cond_lock(*ptlp,
                                                   >> 4348                            !(res = __follow_pte_pmd(mm, address, start, end,
                                                   >> 4349                                                     ptepp, pmdpp, ptlp)));
                                                   >> 4350         return res;
                                                   >> 4351 }
                                                   >> 4352 EXPORT_SYMBOL(follow_pte_pmd);
6481                                                  4353 
6482 /**                                              4354 /**
6483  * follow_pfnmap_end(): End a follow_pfnmap_s !! 4355  * follow_pfn - look up PFN at a user virtual address
6484  * @args: Pointer to struct @follow_pfnmap_ar !! 4356  * @vma: memory mapping
                                                   >> 4357  * @address: user virtual address
                                                   >> 4358  * @pfn: location to store found PFN
                                                   >> 4359  *
                                                   >> 4360  * Only IO mappings and raw PFN mappings are allowed.
6485  *                                               4361  *
6486  * Must be used in pair of follow_pfnmap_star !! 4362  * Returns zero and the pfn at @pfn on success, -ve otherwise.
6487  * above for more information.                << 
6488  */                                              4363  */
6489 void follow_pfnmap_end(struct follow_pfnmap_a !! 4364 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
                                                   >> 4365         unsigned long *pfn)
6490 {                                                4366 {
6491         if (args->lock)                       !! 4367         int ret = -EINVAL;
6492                 spin_unlock(args->lock);      !! 4368         spinlock_t *ptl;
6493         if (args->ptep)                       !! 4369         pte_t *ptep;
6494                 pte_unmap(args->ptep);        !! 4370 
                                                   >> 4371         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
                                                   >> 4372                 return ret;
                                                   >> 4373 
                                                   >> 4374         ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
                                                   >> 4375         if (ret)
                                                   >> 4376                 return ret;
                                                   >> 4377         *pfn = pte_pfn(*ptep);
                                                   >> 4378         pte_unmap_unlock(ptep, ptl);
                                                   >> 4379         return 0;
6495 }                                                4380 }
6496 EXPORT_SYMBOL_GPL(follow_pfnmap_end);         !! 4381 EXPORT_SYMBOL(follow_pfn);
6497                                                  4382 
6498 #ifdef CONFIG_HAVE_IOREMAP_PROT                  4383 #ifdef CONFIG_HAVE_IOREMAP_PROT
6499 /**                                           !! 4384 int follow_phys(struct vm_area_struct *vma,
6500  * generic_access_phys - generic implementati !! 4385                 unsigned long address, unsigned int flags,
6501  * @vma: the vma to access                    !! 4386                 unsigned long *prot, resource_size_t *phys)
6502  * @addr: userspace address, not relative off !! 4387 {
6503  * @buf: buffer to read/write                 !! 4388         int ret = -EINVAL;
6504  * @len: length of transfer                   !! 4389         pte_t *ptep, pte;
6505  * @write: set to FOLL_WRITE when writing, ot !! 4390         spinlock_t *ptl;
6506  *                                            !! 4391 
6507  * This is a generic implementation for &vm_o !! 4392         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6508  * iomem mapping. This callback is used by ac !! 4393                 goto out;
6509  * not page based.                            !! 4394 
6510  */                                           !! 4395         if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
                                                   >> 4396                 goto out;
                                                   >> 4397         pte = *ptep;
                                                   >> 4398 
                                                   >> 4399         if ((flags & FOLL_WRITE) && !pte_write(pte))
                                                   >> 4400                 goto unlock;
                                                   >> 4401 
                                                   >> 4402         *prot = pgprot_val(pte_pgprot(pte));
                                                   >> 4403         *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
                                                   >> 4404 
                                                   >> 4405         ret = 0;
                                                   >> 4406 unlock:
                                                   >> 4407         pte_unmap_unlock(ptep, ptl);
                                                   >> 4408 out:
                                                   >> 4409         return ret;
                                                   >> 4410 }
                                                   >> 4411 
6511 int generic_access_phys(struct vm_area_struct    4412 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6512                         void *buf, int len, i    4413                         void *buf, int len, int write)
6513 {                                                4414 {
6514         resource_size_t phys_addr;               4415         resource_size_t phys_addr;
6515         unsigned long prot = 0;                  4416         unsigned long prot = 0;
6516         void __iomem *maddr;                     4417         void __iomem *maddr;
6517         int offset = offset_in_page(addr);    !! 4418         int offset = addr & (PAGE_SIZE-1);
6518         int ret = -EINVAL;                    << 
6519         bool writable;                        << 
6520         struct follow_pfnmap_args args = { .v << 
6521                                               << 
6522 retry:                                        << 
6523         if (follow_pfnmap_start(&args))       << 
6524                 return -EINVAL;               << 
6525         prot = pgprot_val(args.pgprot);       << 
6526         phys_addr = (resource_size_t)args.pfn << 
6527         writable = args.writable;             << 
6528         follow_pfnmap_end(&args);             << 
6529                                                  4419 
6530         if ((write & FOLL_WRITE) && !writable !! 4420         if (follow_phys(vma, addr, write, &prot, &phys_addr))
6531                 return -EINVAL;                  4421                 return -EINVAL;
6532                                                  4422 
6533         maddr = ioremap_prot(phys_addr, PAGE_    4423         maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6534         if (!maddr)                              4424         if (!maddr)
6535                 return -ENOMEM;                  4425                 return -ENOMEM;
6536                                                  4426 
6537         if (follow_pfnmap_start(&args))       << 
6538                 goto out_unmap;               << 
6539                                               << 
6540         if ((prot != pgprot_val(args.pgprot)) << 
6541             (phys_addr != (args.pfn << PAGE_S << 
6542             (writable != args.writable)) {    << 
6543                 follow_pfnmap_end(&args);     << 
6544                 iounmap(maddr);               << 
6545                 goto retry;                   << 
6546         }                                     << 
6547                                               << 
6548         if (write)                               4427         if (write)
6549                 memcpy_toio(maddr + offset, b    4428                 memcpy_toio(maddr + offset, buf, len);
6550         else                                     4429         else
6551                 memcpy_fromio(buf, maddr + of    4430                 memcpy_fromio(buf, maddr + offset, len);
6552         ret = len;                            << 
6553         follow_pfnmap_end(&args);             << 
6554 out_unmap:                                    << 
6555         iounmap(maddr);                          4431         iounmap(maddr);
6556                                                  4432 
6557         return ret;                           !! 4433         return len;
6558 }                                                4434 }
6559 EXPORT_SYMBOL_GPL(generic_access_phys);          4435 EXPORT_SYMBOL_GPL(generic_access_phys);
6560 #endif                                           4436 #endif
6561                                                  4437 
6562 /*                                               4438 /*
6563  * Access another process' address space as g !! 4439  * Access another process' address space as given in mm.  If non-NULL, use the
                                                   >> 4440  * given task for page fault accounting.
6564  */                                              4441  */
6565 static int __access_remote_vm(struct mm_struc !! 4442 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
6566                               void *buf, int  !! 4443                 unsigned long addr, void *buf, int len, unsigned int gup_flags)
6567 {                                                4444 {
                                                   >> 4445         struct vm_area_struct *vma;
6568         void *old_buf = buf;                     4446         void *old_buf = buf;
6569         int write = gup_flags & FOLL_WRITE;      4447         int write = gup_flags & FOLL_WRITE;
6570                                                  4448 
6571         if (mmap_read_lock_killable(mm))      !! 4449         down_read(&mm->mmap_sem);
6572                 return 0;                     << 
6573                                               << 
6574         /* Untag the address before looking u << 
6575         addr = untagged_addr_remote(mm, addr) << 
6576                                               << 
6577         /* Avoid triggering the temporary war << 
6578         if (!vma_lookup(mm, addr) && !expand_ << 
6579                 return 0;                     << 
6580                                               << 
6581         /* ignore errors, just check how much    4450         /* ignore errors, just check how much was successfully transferred */
6582         while (len) {                            4451         while (len) {
6583                 int bytes, offset;            !! 4452                 int bytes, ret, offset;
6584                 void *maddr;                     4453                 void *maddr;
6585                 struct vm_area_struct *vma =  !! 4454                 struct page *page = NULL;
6586                 struct page *page = get_user_ << 
6587                                               << 
6588                                               << 
6589                 if (IS_ERR(page)) {           << 
6590                         /* We might need to e << 
6591                         vma = vma_lookup(mm,  << 
6592                         if (!vma) {           << 
6593                                 vma = expand_ << 
6594                                               << 
6595                                 /* mmap_lock  << 
6596                                 if (!vma)     << 
6597                                         retur << 
6598                                               << 
6599                                 /* Try again  << 
6600                                 continue;     << 
6601                         }                     << 
6602                                                  4455 
                                                   >> 4456                 ret = get_user_pages_remote(tsk, mm, addr, 1,
                                                   >> 4457                                 gup_flags, &page, &vma, NULL);
                                                   >> 4458                 if (ret <= 0) {
                                                   >> 4459 #ifndef CONFIG_HAVE_IOREMAP_PROT
                                                   >> 4460                         break;
                                                   >> 4461 #else
6603                         /*                       4462                         /*
6604                          * Check if this is a    4463                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
6605                          * we can access usin    4464                          * we can access using slightly different code.
6606                          */                      4465                          */
6607                         bytes = 0;            !! 4466                         vma = find_vma(mm, addr);
6608 #ifdef CONFIG_HAVE_IOREMAP_PROT               !! 4467                         if (!vma || vma->vm_start > addr)
                                                   >> 4468                                 break;
6609                         if (vma->vm_ops && vm    4469                         if (vma->vm_ops && vma->vm_ops->access)
6610                                 bytes = vma-> !! 4470                                 ret = vma->vm_ops->access(vma, addr, buf,
6611                                               !! 4471                                                           len, write);
6612 #endif                                        !! 4472                         if (ret <= 0)
6613                         if (bytes <= 0)       << 
6614                                 break;           4473                                 break;
                                                   >> 4474                         bytes = ret;
                                                   >> 4475 #endif
6615                 } else {                         4476                 } else {
6616                         bytes = len;             4477                         bytes = len;
6617                         offset = addr & (PAGE    4478                         offset = addr & (PAGE_SIZE-1);
6618                         if (bytes > PAGE_SIZE    4479                         if (bytes > PAGE_SIZE-offset)
6619                                 bytes = PAGE_    4480                                 bytes = PAGE_SIZE-offset;
6620                                                  4481 
6621                         maddr = kmap_local_pa !! 4482                         maddr = kmap(page);
6622                         if (write) {             4483                         if (write) {
6623                                 copy_to_user_    4484                                 copy_to_user_page(vma, page, addr,
6624                                                  4485                                                   maddr + offset, buf, bytes);
6625                                 set_page_dirt    4486                                 set_page_dirty_lock(page);
6626                         } else {                 4487                         } else {
6627                                 copy_from_use    4488                                 copy_from_user_page(vma, page, addr,
6628                                                  4489                                                     buf, maddr + offset, bytes);
6629                         }                        4490                         }
6630                         unmap_and_put_page(pa !! 4491                         kunmap(page);
                                                   >> 4492                         put_page(page);
6631                 }                                4493                 }
6632                 len -= bytes;                    4494                 len -= bytes;
6633                 buf += bytes;                    4495                 buf += bytes;
6634                 addr += bytes;                   4496                 addr += bytes;
6635         }                                        4497         }
6636         mmap_read_unlock(mm);                 !! 4498         up_read(&mm->mmap_sem);
6637                                                  4499 
6638         return buf - old_buf;                    4500         return buf - old_buf;
6639 }                                                4501 }
6640                                                  4502 
6641 /**                                              4503 /**
6642  * access_remote_vm - access another process'    4504  * access_remote_vm - access another process' address space
6643  * @mm:         the mm_struct of the target a    4505  * @mm:         the mm_struct of the target address space
6644  * @addr:       start address to access          4506  * @addr:       start address to access
6645  * @buf:        source or destination buffer     4507  * @buf:        source or destination buffer
6646  * @len:        number of bytes to transfer      4508  * @len:        number of bytes to transfer
6647  * @gup_flags:  flags modifying lookup behavi    4509  * @gup_flags:  flags modifying lookup behaviour
6648  *                                               4510  *
6649  * The caller must hold a reference on @mm.      4511  * The caller must hold a reference on @mm.
6650  *                                            << 
6651  * Return: number of bytes copied from source << 
6652  */                                              4512  */
6653 int access_remote_vm(struct mm_struct *mm, un    4513 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6654                 void *buf, int len, unsigned     4514                 void *buf, int len, unsigned int gup_flags)
6655 {                                                4515 {
6656         return __access_remote_vm(mm, addr, b !! 4516         return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
6657 }                                                4517 }
6658                                                  4518 
6659 /*                                               4519 /*
6660  * Access another process' address space.        4520  * Access another process' address space.
6661  * Source/target buffer must be kernel space,    4521  * Source/target buffer must be kernel space,
6662  * Do not walk the page table directly, use g    4522  * Do not walk the page table directly, use get_user_pages
6663  */                                              4523  */
6664 int access_process_vm(struct task_struct *tsk    4524 int access_process_vm(struct task_struct *tsk, unsigned long addr,
6665                 void *buf, int len, unsigned     4525                 void *buf, int len, unsigned int gup_flags)
6666 {                                                4526 {
6667         struct mm_struct *mm;                    4527         struct mm_struct *mm;
6668         int ret;                                 4528         int ret;
6669                                                  4529 
6670         mm = get_task_mm(tsk);                   4530         mm = get_task_mm(tsk);
6671         if (!mm)                                 4531         if (!mm)
6672                 return 0;                        4532                 return 0;
6673                                                  4533 
6674         ret = __access_remote_vm(mm, addr, bu !! 4534         ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
6675                                                  4535 
6676         mmput(mm);                               4536         mmput(mm);
6677                                                  4537 
6678         return ret;                              4538         return ret;
6679 }                                                4539 }
6680 EXPORT_SYMBOL_GPL(access_process_vm);            4540 EXPORT_SYMBOL_GPL(access_process_vm);
6681                                                  4541 
6682 /*                                               4542 /*
6683  * Print the name of a VMA.                      4543  * Print the name of a VMA.
6684  */                                              4544  */
6685 void print_vma_addr(char *prefix, unsigned lo    4545 void print_vma_addr(char *prefix, unsigned long ip)
6686 {                                                4546 {
6687         struct mm_struct *mm = current->mm;      4547         struct mm_struct *mm = current->mm;
6688         struct vm_area_struct *vma;              4548         struct vm_area_struct *vma;
6689                                                  4549 
6690         /*                                       4550         /*
6691          * we might be running from an atomic    4551          * we might be running from an atomic context so we cannot sleep
6692          */                                      4552          */
6693         if (!mmap_read_trylock(mm))           !! 4553         if (!down_read_trylock(&mm->mmap_sem))
6694                 return;                          4554                 return;
6695                                                  4555 
6696         vma = vma_lookup(mm, ip);             !! 4556         vma = find_vma(mm, ip);
6697         if (vma && vma->vm_file) {               4557         if (vma && vma->vm_file) {
6698                 struct file *f = vma->vm_file    4558                 struct file *f = vma->vm_file;
6699                 ip -= vma->vm_start;          !! 4559                 char *buf = (char *)__get_free_page(GFP_NOWAIT);
6700                 ip += vma->vm_pgoff << PAGE_S !! 4560                 if (buf) {
6701                 printk("%s%pD[%lx,%lx+%lx]",  !! 4561                         char *p;
6702                                 vma->vm_start !! 4562 
6703                                 vma->vm_end - !! 4563                         p = file_path(f, buf, PAGE_SIZE);
                                                   >> 4564                         if (IS_ERR(p))
                                                   >> 4565                                 p = "?";
                                                   >> 4566                         printk("%s%s[%lx+%lx]", prefix, kbasename(p),
                                                   >> 4567                                         vma->vm_start,
                                                   >> 4568                                         vma->vm_end - vma->vm_start);
                                                   >> 4569                         free_page((unsigned long)buf);
                                                   >> 4570                 }
6704         }                                        4571         }
6705         mmap_read_unlock(mm);                 !! 4572         up_read(&mm->mmap_sem);
6706 }                                                4573 }
6707                                                  4574 
6708 #if defined(CONFIG_PROVE_LOCKING) || defined(    4575 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6709 void __might_fault(const char *file, int line    4576 void __might_fault(const char *file, int line)
6710 {                                                4577 {
                                                   >> 4578         /*
                                                   >> 4579          * Some code (nfs/sunrpc) uses socket ops on kernel memory while
                                                   >> 4580          * holding the mmap_sem, this is safe because kernel memory doesn't
                                                   >> 4581          * get paged out, therefore we'll never actually fault, and the
                                                   >> 4582          * below annotations will generate false positives.
                                                   >> 4583          */
                                                   >> 4584         if (uaccess_kernel())
                                                   >> 4585                 return;
6711         if (pagefault_disabled())                4586         if (pagefault_disabled())
6712                 return;                          4587                 return;
6713         __might_sleep(file, line);            !! 4588         __might_sleep(file, line, 0);
6714 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)           4589 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6715         if (current->mm)                         4590         if (current->mm)
6716                 might_lock_read(&current->mm- !! 4591                 might_lock_read(&current->mm->mmap_sem);
6717 #endif                                           4592 #endif
6718 }                                                4593 }
6719 EXPORT_SYMBOL(__might_fault);                    4594 EXPORT_SYMBOL(__might_fault);
6720 #endif                                           4595 #endif
6721                                                  4596 
6722 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || d    4597 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
6723 /*                                            !! 4598 static void clear_gigantic_page(struct page *page,
6724  * Process all subpages of the specified huge !! 4599                                 unsigned long addr,
6725  * operation.  The target subpage will be pro !! 4600                                 unsigned int pages_per_huge_page)
6726  * cache lines hot.                           << 
6727  */                                           << 
6728 static inline int process_huge_page(          << 
6729         unsigned long addr_hint, unsigned int << 
6730         int (*process_subpage)(unsigned long  << 
6731         void *arg)                            << 
6732 {                                                4601 {
6733         int i, n, base, l, ret;               !! 4602         int i;
                                                   >> 4603         struct page *p = page;
                                                   >> 4604 
                                                   >> 4605         might_sleep();
                                                   >> 4606         for (i = 0; i < pages_per_huge_page;
                                                   >> 4607              i++, p = mem_map_next(p, page, i)) {
                                                   >> 4608                 cond_resched();
                                                   >> 4609                 clear_user_highpage(p, addr + i * PAGE_SIZE);
                                                   >> 4610         }
                                                   >> 4611 }
                                                   >> 4612 void clear_huge_page(struct page *page,
                                                   >> 4613                      unsigned long addr_hint, unsigned int pages_per_huge_page)
                                                   >> 4614 {
                                                   >> 4615         int i, n, base, l;
6734         unsigned long addr = addr_hint &         4616         unsigned long addr = addr_hint &
6735                 ~(((unsigned long)nr_pages << !! 4617                 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
6736                                                  4618 
6737         /* Process target subpage last to kee !! 4619         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
                                                   >> 4620                 clear_gigantic_page(page, addr, pages_per_huge_page);
                                                   >> 4621                 return;
                                                   >> 4622         }
                                                   >> 4623 
                                                   >> 4624         /* Clear sub-page to access last to keep its cache lines hot */
6738         might_sleep();                           4625         might_sleep();
6739         n = (addr_hint - addr) / PAGE_SIZE;      4626         n = (addr_hint - addr) / PAGE_SIZE;
6740         if (2 * n <= nr_pages) {              !! 4627         if (2 * n <= pages_per_huge_page) {
6741                 /* If target subpage in first !! 4628                 /* If sub-page to access in first half of huge page */
6742                 base = 0;                        4629                 base = 0;
6743                 l = n;                           4630                 l = n;
6744                 /* Process subpages at the en !! 4631                 /* Clear sub-pages at the end of huge page */
6745                 for (i = nr_pages - 1; i >= 2 !! 4632                 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
6746                         cond_resched();          4633                         cond_resched();
6747                         ret = process_subpage !! 4634                         clear_user_highpage(page + i, addr + i * PAGE_SIZE);
6748                         if (ret)              << 
6749                                 return ret;   << 
6750                 }                                4635                 }
6751         } else {                                 4636         } else {
6752                 /* If target subpage in secon !! 4637                 /* If sub-page to access in second half of huge page */
6753                 base = nr_pages - 2 * (nr_pag !! 4638                 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
6754                 l = nr_pages - n;             !! 4639                 l = pages_per_huge_page - n;
6755                 /* Process subpages at the be !! 4640                 /* Clear sub-pages at the begin of huge page */
6756                 for (i = 0; i < base; i++) {     4641                 for (i = 0; i < base; i++) {
6757                         cond_resched();          4642                         cond_resched();
6758                         ret = process_subpage !! 4643                         clear_user_highpage(page + i, addr + i * PAGE_SIZE);
6759                         if (ret)              << 
6760                                 return ret;   << 
6761                 }                                4644                 }
6762         }                                        4645         }
6763         /*                                       4646         /*
6764          * Process remaining subpages in left !! 4647          * Clear remaining sub-pages in left-right-left-right pattern
6765          * towards the target subpage         !! 4648          * towards the sub-page to access
6766          */                                      4649          */
6767         for (i = 0; i < l; i++) {                4650         for (i = 0; i < l; i++) {
6768                 int left_idx = base + i;         4651                 int left_idx = base + i;
6769                 int right_idx = base + 2 * l     4652                 int right_idx = base + 2 * l - 1 - i;
6770                                                  4653 
6771                 cond_resched();                  4654                 cond_resched();
6772                 ret = process_subpage(addr +  !! 4655                 clear_user_highpage(page + left_idx,
6773                 if (ret)                      !! 4656                                     addr + left_idx * PAGE_SIZE);
6774                         return ret;           << 
6775                 cond_resched();                  4657                 cond_resched();
6776                 ret = process_subpage(addr +  !! 4658                 clear_user_highpage(page + right_idx,
6777                 if (ret)                      !! 4659                                     addr + right_idx * PAGE_SIZE);
6778                         return ret;           << 
6779         }                                        4660         }
6780         return 0;                             << 
6781 }                                                4661 }
6782                                                  4662 
6783 static void clear_gigantic_page(struct folio  !! 4663 static void copy_user_gigantic_page(struct page *dst, struct page *src,
6784                                 unsigned int  !! 4664                                     unsigned long addr,
                                                   >> 4665                                     struct vm_area_struct *vma,
                                                   >> 4666                                     unsigned int pages_per_huge_page)
6785 {                                                4667 {
6786         int i;                                   4668         int i;
                                                   >> 4669         struct page *dst_base = dst;
                                                   >> 4670         struct page *src_base = src;
6787                                                  4671 
6788         might_sleep();                        !! 4672         for (i = 0; i < pages_per_huge_page; ) {
6789         for (i = 0; i < nr_pages; i++) {      << 
6790                 cond_resched();                  4673                 cond_resched();
6791                 clear_user_highpage(folio_pag !! 4674                 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
6792         }                                     << 
6793 }                                             << 
6794                                               << 
6795 static int clear_subpage(unsigned long addr,  << 
6796 {                                             << 
6797         struct folio *folio = arg;            << 
6798                                               << 
6799         clear_user_highpage(folio_page(folio, << 
6800         return 0;                             << 
6801 }                                             << 
6802                                                  4675 
6803 /**                                           !! 4676                 i++;
6804  * folio_zero_user - Zero a folio which will  !! 4677                 dst = mem_map_next(dst, dst_base, i);
6805  * @folio: The folio to zero.                 !! 4678                 src = mem_map_next(src, src_base, i);
6806  * @addr_hint: The address will be accessed o !! 4679         }
6807  */                                           << 
6808 void folio_zero_user(struct folio *folio, uns << 
6809 {                                             << 
6810         unsigned int nr_pages = folio_nr_page << 
6811                                               << 
6812         if (unlikely(nr_pages > MAX_ORDER_NR_ << 
6813                 clear_gigantic_page(folio, ad << 
6814         else                                  << 
6815                 process_huge_page(addr_hint,  << 
6816 }                                                4680 }
6817                                                  4681 
6818 static int copy_user_gigantic_page(struct fol !! 4682 void copy_user_huge_page(struct page *dst, struct page *src,
6819                                    unsigned l !! 4683                          unsigned long addr, struct vm_area_struct *vma,
6820                                    struct vm_ !! 4684                          unsigned int pages_per_huge_page)
6821                                    unsigned i << 
6822 {                                                4685 {
6823         int i;                                   4686         int i;
6824         struct page *dst_page;                << 
6825         struct page *src_page;                << 
6826                                                  4687 
6827         for (i = 0; i < nr_pages; i++) {      !! 4688         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
6828                 dst_page = folio_page(dst, i) !! 4689                 copy_user_gigantic_page(dst, src, addr, vma,
6829                 src_page = folio_page(src, i) !! 4690                                         pages_per_huge_page);
                                                   >> 4691                 return;
                                                   >> 4692         }
6830                                                  4693 
                                                   >> 4694         might_sleep();
                                                   >> 4695         for (i = 0; i < pages_per_huge_page; i++) {
6831                 cond_resched();                  4696                 cond_resched();
6832                 if (copy_mc_user_highpage(dst !! 4697                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
6833                                           add << 
6834                         return -EHWPOISON;    << 
6835         }                                        4698         }
6836         return 0;                             << 
6837 }                                             << 
6838                                               << 
6839 struct copy_subpage_arg {                     << 
6840         struct folio *dst;                    << 
6841         struct folio *src;                    << 
6842         struct vm_area_struct *vma;           << 
6843 };                                            << 
6844                                               << 
6845 static int copy_subpage(unsigned long addr, i << 
6846 {                                             << 
6847         struct copy_subpage_arg *copy_arg = a << 
6848         struct page *dst = folio_page(copy_ar << 
6849         struct page *src = folio_page(copy_ar << 
6850                                               << 
6851         if (copy_mc_user_highpage(dst, src, a << 
6852                 return -EHWPOISON;            << 
6853         return 0;                             << 
6854 }                                             << 
6855                                               << 
6856 int copy_user_large_folio(struct folio *dst,  << 
6857                           unsigned long addr_ << 
6858 {                                             << 
6859         unsigned int nr_pages = folio_nr_page << 
6860         struct copy_subpage_arg arg = {       << 
6861                 .dst = dst,                   << 
6862                 .src = src,                   << 
6863                 .vma = vma,                   << 
6864         };                                    << 
6865                                               << 
6866         if (unlikely(nr_pages > MAX_ORDER_NR_ << 
6867                 return copy_user_gigantic_pag << 
6868                                               << 
6869         return process_huge_page(addr_hint, n << 
6870 }                                                4699 }
6871                                                  4700 
6872 long copy_folio_from_user(struct folio *dst_f !! 4701 long copy_huge_page_from_user(struct page *dst_page,
6873                            const void __user  !! 4702                                 const void __user *usr_src,
6874                            bool allow_pagefau !! 4703                                 unsigned int pages_per_huge_page,
                                                   >> 4704                                 bool allow_pagefault)
6875 {                                                4705 {
6876         void *kaddr;                          !! 4706         void *src = (void *)usr_src;
                                                   >> 4707         void *page_kaddr;
6877         unsigned long i, rc = 0;                 4708         unsigned long i, rc = 0;
6878         unsigned int nr_pages = folio_nr_page !! 4709         unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
6879         unsigned long ret_val = nr_pages * PA !! 4710 
6880         struct page *subpage;                 !! 4711         for (i = 0; i < pages_per_huge_page; i++) {
6881                                               !! 4712                 if (allow_pagefault)
6882         for (i = 0; i < nr_pages; i++) {      !! 4713                         page_kaddr = kmap(dst_page + i);
6883                 subpage = folio_page(dst_foli !! 4714                 else
6884                 kaddr = kmap_local_page(subpa !! 4715                         page_kaddr = kmap_atomic(dst_page + i);
6885                 if (!allow_pagefault)         !! 4716                 rc = copy_from_user(page_kaddr,
6886                         pagefault_disable();  !! 4717                                 (const void __user *)(src + i * PAGE_SIZE),
6887                 rc = copy_from_user(kaddr, us !! 4718                                 PAGE_SIZE);
6888                 if (!allow_pagefault)         !! 4719                 if (allow_pagefault)
6889                         pagefault_enable();   !! 4720                         kunmap(dst_page + i);
6890                 kunmap_local(kaddr);          !! 4721                 else
                                                   >> 4722                         kunmap_atomic(page_kaddr);
6891                                                  4723 
6892                 ret_val -= (PAGE_SIZE - rc);     4724                 ret_val -= (PAGE_SIZE - rc);
6893                 if (rc)                          4725                 if (rc)
6894                         break;                   4726                         break;
6895                                                  4727 
6896                 flush_dcache_page(subpage);   << 
6897                                               << 
6898                 cond_resched();                  4728                 cond_resched();
6899         }                                        4729         }
6900         return ret_val;                          4730         return ret_val;
6901 }                                                4731 }
6902 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONF    4732 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
6903                                                  4733 
6904 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLO !! 4734 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
6905                                                  4735 
6906 static struct kmem_cache *page_ptl_cachep;       4736 static struct kmem_cache *page_ptl_cachep;
6907                                                  4737 
6908 void __init ptlock_cache_init(void)              4738 void __init ptlock_cache_init(void)
6909 {                                                4739 {
6910         page_ptl_cachep = kmem_cache_create("    4740         page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
6911                         SLAB_PANIC, NULL);       4741                         SLAB_PANIC, NULL);
6912 }                                                4742 }
6913                                                  4743 
6914 bool ptlock_alloc(struct ptdesc *ptdesc)      !! 4744 bool ptlock_alloc(struct page *page)
6915 {                                                4745 {
6916         spinlock_t *ptl;                         4746         spinlock_t *ptl;
6917                                                  4747 
6918         ptl = kmem_cache_alloc(page_ptl_cache    4748         ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
6919         if (!ptl)                                4749         if (!ptl)
6920                 return false;                    4750                 return false;
6921         ptdesc->ptl = ptl;                    !! 4751         page->ptl = ptl;
6922         return true;                             4752         return true;
6923 }                                                4753 }
6924                                                  4754 
6925 void ptlock_free(struct ptdesc *ptdesc)       !! 4755 void ptlock_free(struct page *page)
6926 {                                                4756 {
6927         kmem_cache_free(page_ptl_cachep, ptde !! 4757         kmem_cache_free(page_ptl_cachep, page->ptl);
6928 }                                                4758 }
6929 #endif                                           4759 #endif
6930                                               << 
6931 void vma_pgtable_walk_begin(struct vm_area_st << 
6932 {                                             << 
6933         if (is_vm_hugetlb_page(vma))          << 
6934                 hugetlb_vma_lock_read(vma);   << 
6935 }                                             << 
6936                                               << 
6937 void vma_pgtable_walk_end(struct vm_area_stru << 
6938 {                                             << 
6939         if (is_vm_hugetlb_page(vma))          << 
6940                 hugetlb_vma_unlock_read(vma); << 
6941 }                                             << 
6942                                                  4760 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php