~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/rmap.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/rmap.c (Version linux-6.12-rc7) and /mm/rmap.c (Version linux-6.2.16)


  1 /*                                                  1 /*
  2  * mm/rmap.c - physical to virtual reverse map      2  * mm/rmap.c - physical to virtual reverse mappings
  3  *                                                  3  *
  4  * Copyright 2001, Rik van Riel <riel@conectiv      4  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
  5  * Released under the General Public License (      5  * Released under the General Public License (GPL).
  6  *                                                  6  *
  7  * Simple, low overhead reverse mapping scheme      7  * Simple, low overhead reverse mapping scheme.
  8  * Please try to keep this thing as modular as      8  * Please try to keep this thing as modular as possible.
  9  *                                                  9  *
 10  * Provides methods for unmapping each kind of     10  * Provides methods for unmapping each kind of mapped page:
 11  * the anon methods track anonymous pages, and     11  * the anon methods track anonymous pages, and
 12  * the file methods track pages belonging to a     12  * the file methods track pages belonging to an inode.
 13  *                                                 13  *
 14  * Original design by Rik van Riel <riel@conec     14  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
 15  * File methods by Dave McCracken <dmccr@us.ib     15  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
 16  * Anonymous methods by Andrea Arcangeli <andr     16  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
 17  * Contributions by Hugh Dickins 2003, 2004        17  * Contributions by Hugh Dickins 2003, 2004
 18  */                                                18  */
 19                                                    19 
 20 /*                                                 20 /*
 21  * Lock ordering in mm:                            21  * Lock ordering in mm:
 22  *                                                 22  *
 23  * inode->i_rwsem       (while writing or trun     23  * inode->i_rwsem       (while writing or truncating, not reading or faulting)
 24  *   mm->mmap_lock                                 24  *   mm->mmap_lock
 25  *     mapping->invalidate_lock (in filemap_fa     25  *     mapping->invalidate_lock (in filemap_fault)
 26  *       folio_lock                            !!  26  *       page->flags PG_locked (lock_page)
 27  *         hugetlbfs_i_mmap_rwsem_key (in huge     27  *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
 28  *           vma_start_write                   !!  28  *           mapping->i_mmap_rwsem
 29  *             mapping->i_mmap_rwsem           !!  29  *             anon_vma->rwsem
 30  *               anon_vma->rwsem               !!  30  *               mm->page_table_lock or pte_lock
 31  *                 mm->page_table_lock or pte_ !!  31  *                 swap_lock (in swap_duplicate, swap_info_get)
 32  *                   swap_lock (in swap_duplic !!  32  *                   mmlist_lock (in mmput, drain_mmlist and others)
 33  *                     mmlist_lock (in mmput,  !!  33  *                   mapping->private_lock (in block_dirty_folio)
 34  *                     mapping->private_lock ( !!  34  *                     folio_lock_memcg move_lock (in block_dirty_folio)
 35  *                       folio_lock_memcg move !!  35  *                       i_pages lock (widely used)
 36  *                         i_pages lock (widel !!  36  *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
 37  *                           lruvec->lru_lock  !!  37  *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
 38  *                     inode->i_lock (in set_p !!  38  *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
 39  *                     bdi.wb->list_lock (in s !!  39  *                     sb_lock (within inode_lock in fs/fs-writeback.c)
 40  *                       sb_lock (within inode !!  40  *                     i_pages lock (widely used, in set_page_dirty,
 41  *                       i_pages lock (widely  !!  41  *                               in arch-dependent flush_dcache_mmap_lock,
 42  *                                 in arch-dep !!  42  *                               within bdi.wb->list_lock in __sync_single_inode)
 43  *                                 within bdi. << 
 44  *                                                 43  *
 45  * anon_vma->rwsem,mapping->i_mmap_rwsem   (me     44  * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
 46  *   ->tasklist_lock                               45  *   ->tasklist_lock
 47  *     pte map lock                                46  *     pte map lock
 48  *                                                 47  *
 49  * hugetlbfs PageHuge() take locks in this ord     48  * hugetlbfs PageHuge() take locks in this order:
 50  *   hugetlb_fault_mutex (hugetlbfs specific p     49  *   hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
 51  *     vma_lock (hugetlb specific lock for pmd     50  *     vma_lock (hugetlb specific lock for pmd_sharing)
 52  *       mapping->i_mmap_rwsem (also used for      51  *       mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
 53  *         folio_lock                          !!  52  *         page->flags PG_locked (lock_page)
 54  */                                                53  */
 55                                                    54 
 56 #include <linux/mm.h>                              55 #include <linux/mm.h>
 57 #include <linux/sched/mm.h>                        56 #include <linux/sched/mm.h>
 58 #include <linux/sched/task.h>                      57 #include <linux/sched/task.h>
 59 #include <linux/pagemap.h>                         58 #include <linux/pagemap.h>
 60 #include <linux/swap.h>                            59 #include <linux/swap.h>
 61 #include <linux/swapops.h>                         60 #include <linux/swapops.h>
 62 #include <linux/slab.h>                            61 #include <linux/slab.h>
 63 #include <linux/init.h>                            62 #include <linux/init.h>
 64 #include <linux/ksm.h>                             63 #include <linux/ksm.h>
 65 #include <linux/rmap.h>                            64 #include <linux/rmap.h>
 66 #include <linux/rcupdate.h>                        65 #include <linux/rcupdate.h>
 67 #include <linux/export.h>                          66 #include <linux/export.h>
 68 #include <linux/memcontrol.h>                      67 #include <linux/memcontrol.h>
 69 #include <linux/mmu_notifier.h>                    68 #include <linux/mmu_notifier.h>
 70 #include <linux/migrate.h>                         69 #include <linux/migrate.h>
 71 #include <linux/hugetlb.h>                         70 #include <linux/hugetlb.h>
 72 #include <linux/huge_mm.h>                         71 #include <linux/huge_mm.h>
 73 #include <linux/backing-dev.h>                     72 #include <linux/backing-dev.h>
 74 #include <linux/page_idle.h>                       73 #include <linux/page_idle.h>
 75 #include <linux/memremap.h>                        74 #include <linux/memremap.h>
 76 #include <linux/userfaultfd_k.h>                   75 #include <linux/userfaultfd_k.h>
 77 #include <linux/mm_inline.h>                       76 #include <linux/mm_inline.h>
 78 #include <linux/oom.h>                         << 
 79                                                    77 
 80 #include <asm/tlbflush.h>                          78 #include <asm/tlbflush.h>
 81                                                    79 
 82 #define CREATE_TRACE_POINTS                        80 #define CREATE_TRACE_POINTS
 83 #include <trace/events/tlb.h>                      81 #include <trace/events/tlb.h>
 84 #include <trace/events/migrate.h>                  82 #include <trace/events/migrate.h>
 85                                                    83 
 86 #include "internal.h"                              84 #include "internal.h"
 87                                                    85 
 88 static struct kmem_cache *anon_vma_cachep;         86 static struct kmem_cache *anon_vma_cachep;
 89 static struct kmem_cache *anon_vma_chain_cache     87 static struct kmem_cache *anon_vma_chain_cachep;
 90                                                    88 
 91 static inline struct anon_vma *anon_vma_alloc(     89 static inline struct anon_vma *anon_vma_alloc(void)
 92 {                                                  90 {
 93         struct anon_vma *anon_vma;                 91         struct anon_vma *anon_vma;
 94                                                    92 
 95         anon_vma = kmem_cache_alloc(anon_vma_c     93         anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
 96         if (anon_vma) {                            94         if (anon_vma) {
 97                 atomic_set(&anon_vma->refcount     95                 atomic_set(&anon_vma->refcount, 1);
 98                 anon_vma->num_children = 0;        96                 anon_vma->num_children = 0;
 99                 anon_vma->num_active_vmas = 0;     97                 anon_vma->num_active_vmas = 0;
100                 anon_vma->parent = anon_vma;       98                 anon_vma->parent = anon_vma;
101                 /*                                 99                 /*
102                  * Initialise the anon_vma roo    100                  * Initialise the anon_vma root to point to itself. If called
103                  * from fork, the root will be    101                  * from fork, the root will be reset to the parents anon_vma.
104                  */                               102                  */
105                 anon_vma->root = anon_vma;        103                 anon_vma->root = anon_vma;
106         }                                         104         }
107                                                   105 
108         return anon_vma;                          106         return anon_vma;
109 }                                                 107 }
110                                                   108 
111 static inline void anon_vma_free(struct anon_v    109 static inline void anon_vma_free(struct anon_vma *anon_vma)
112 {                                                 110 {
113         VM_BUG_ON(atomic_read(&anon_vma->refco    111         VM_BUG_ON(atomic_read(&anon_vma->refcount));
114                                                   112 
115         /*                                        113         /*
116          * Synchronize against folio_lock_anon    114          * Synchronize against folio_lock_anon_vma_read() such that
117          * we can safely hold the lock without    115          * we can safely hold the lock without the anon_vma getting
118          * freed.                                 116          * freed.
119          *                                        117          *
120          * Relies on the full mb implied by th    118          * Relies on the full mb implied by the atomic_dec_and_test() from
121          * put_anon_vma() against the acquire     119          * put_anon_vma() against the acquire barrier implied by
122          * down_read_trylock() from folio_lock    120          * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
123          *                                        121          *
124          * folio_lock_anon_vma_read()   VS        122          * folio_lock_anon_vma_read()   VS      put_anon_vma()
125          *   down_read_trylock()                  123          *   down_read_trylock()                  atomic_dec_and_test()
126          *   LOCK                                 124          *   LOCK                                 MB
127          *   atomic_read()                        125          *   atomic_read()                        rwsem_is_locked()
128          *                                        126          *
129          * LOCK should suffice since the actua    127          * LOCK should suffice since the actual taking of the lock must
130          * happen _before_ what follows.          128          * happen _before_ what follows.
131          */                                       129          */
132         might_sleep();                            130         might_sleep();
133         if (rwsem_is_locked(&anon_vma->root->r    131         if (rwsem_is_locked(&anon_vma->root->rwsem)) {
134                 anon_vma_lock_write(anon_vma);    132                 anon_vma_lock_write(anon_vma);
135                 anon_vma_unlock_write(anon_vma    133                 anon_vma_unlock_write(anon_vma);
136         }                                         134         }
137                                                   135 
138         kmem_cache_free(anon_vma_cachep, anon_    136         kmem_cache_free(anon_vma_cachep, anon_vma);
139 }                                                 137 }
140                                                   138 
141 static inline struct anon_vma_chain *anon_vma_    139 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
142 {                                                 140 {
143         return kmem_cache_alloc(anon_vma_chain    141         return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
144 }                                                 142 }
145                                                   143 
146 static void anon_vma_chain_free(struct anon_vm    144 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
147 {                                                 145 {
148         kmem_cache_free(anon_vma_chain_cachep,    146         kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
149 }                                                 147 }
150                                                   148 
151 static void anon_vma_chain_link(struct vm_area    149 static void anon_vma_chain_link(struct vm_area_struct *vma,
152                                 struct anon_vm    150                                 struct anon_vma_chain *avc,
153                                 struct anon_vm    151                                 struct anon_vma *anon_vma)
154 {                                                 152 {
155         avc->vma = vma;                           153         avc->vma = vma;
156         avc->anon_vma = anon_vma;                 154         avc->anon_vma = anon_vma;
157         list_add(&avc->same_vma, &vma->anon_vm    155         list_add(&avc->same_vma, &vma->anon_vma_chain);
158         anon_vma_interval_tree_insert(avc, &an    156         anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
159 }                                                 157 }
160                                                   158 
161 /**                                               159 /**
162  * __anon_vma_prepare - attach an anon_vma to     160  * __anon_vma_prepare - attach an anon_vma to a memory region
163  * @vma: the memory region in question            161  * @vma: the memory region in question
164  *                                                162  *
165  * This makes sure the memory mapping describe    163  * This makes sure the memory mapping described by 'vma' has
166  * an 'anon_vma' attached to it, so that we ca    164  * an 'anon_vma' attached to it, so that we can associate the
167  * anonymous pages mapped into it with that an    165  * anonymous pages mapped into it with that anon_vma.
168  *                                                166  *
169  * The common case will be that we already hav    167  * The common case will be that we already have one, which
170  * is handled inline by anon_vma_prepare(). Bu    168  * is handled inline by anon_vma_prepare(). But if
171  * not we either need to find an adjacent mapp    169  * not we either need to find an adjacent mapping that we
172  * can re-use the anon_vma from (very common w    170  * can re-use the anon_vma from (very common when the only
173  * reason for splitting a vma has been mprotec    171  * reason for splitting a vma has been mprotect()), or we
174  * allocate a new one.                            172  * allocate a new one.
175  *                                                173  *
176  * Anon-vma allocations are very subtle, becau    174  * Anon-vma allocations are very subtle, because we may have
177  * optimistically looked up an anon_vma in fol    175  * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
178  * and that may actually touch the rwsem even     176  * and that may actually touch the rwsem even in the newly
179  * allocated vma (it depends on RCU to make su    177  * allocated vma (it depends on RCU to make sure that the
180  * anon_vma isn't actually destroyed).            178  * anon_vma isn't actually destroyed).
181  *                                                179  *
182  * As a result, we need to do proper anon_vma     180  * As a result, we need to do proper anon_vma locking even
183  * for the new allocation. At the same time, w    181  * for the new allocation. At the same time, we do not want
184  * to do any locking for the common case of al    182  * to do any locking for the common case of already having
185  * an anon_vma.                                   183  * an anon_vma.
                                                   >> 184  *
                                                   >> 185  * This must be called with the mmap_lock held for reading.
186  */                                               186  */
187 int __anon_vma_prepare(struct vm_area_struct *    187 int __anon_vma_prepare(struct vm_area_struct *vma)
188 {                                                 188 {
189         struct mm_struct *mm = vma->vm_mm;        189         struct mm_struct *mm = vma->vm_mm;
190         struct anon_vma *anon_vma, *allocated;    190         struct anon_vma *anon_vma, *allocated;
191         struct anon_vma_chain *avc;               191         struct anon_vma_chain *avc;
192                                                   192 
193         mmap_assert_locked(mm);                << 
194         might_sleep();                            193         might_sleep();
195                                                   194 
196         avc = anon_vma_chain_alloc(GFP_KERNEL)    195         avc = anon_vma_chain_alloc(GFP_KERNEL);
197         if (!avc)                                 196         if (!avc)
198                 goto out_enomem;                  197                 goto out_enomem;
199                                                   198 
200         anon_vma = find_mergeable_anon_vma(vma    199         anon_vma = find_mergeable_anon_vma(vma);
201         allocated = NULL;                         200         allocated = NULL;
202         if (!anon_vma) {                          201         if (!anon_vma) {
203                 anon_vma = anon_vma_alloc();      202                 anon_vma = anon_vma_alloc();
204                 if (unlikely(!anon_vma))          203                 if (unlikely(!anon_vma))
205                         goto out_enomem_free_a    204                         goto out_enomem_free_avc;
206                 anon_vma->num_children++; /* s    205                 anon_vma->num_children++; /* self-parent link for new root */
207                 allocated = anon_vma;             206                 allocated = anon_vma;
208         }                                         207         }
209                                                   208 
210         anon_vma_lock_write(anon_vma);            209         anon_vma_lock_write(anon_vma);
211         /* page_table_lock to protect against     210         /* page_table_lock to protect against threads */
212         spin_lock(&mm->page_table_lock);          211         spin_lock(&mm->page_table_lock);
213         if (likely(!vma->anon_vma)) {             212         if (likely(!vma->anon_vma)) {
214                 vma->anon_vma = anon_vma;         213                 vma->anon_vma = anon_vma;
215                 anon_vma_chain_link(vma, avc,     214                 anon_vma_chain_link(vma, avc, anon_vma);
216                 anon_vma->num_active_vmas++;      215                 anon_vma->num_active_vmas++;
217                 allocated = NULL;                 216                 allocated = NULL;
218                 avc = NULL;                       217                 avc = NULL;
219         }                                         218         }
220         spin_unlock(&mm->page_table_lock);        219         spin_unlock(&mm->page_table_lock);
221         anon_vma_unlock_write(anon_vma);          220         anon_vma_unlock_write(anon_vma);
222                                                   221 
223         if (unlikely(allocated))                  222         if (unlikely(allocated))
224                 put_anon_vma(allocated);          223                 put_anon_vma(allocated);
225         if (unlikely(avc))                        224         if (unlikely(avc))
226                 anon_vma_chain_free(avc);         225                 anon_vma_chain_free(avc);
227                                                   226 
228         return 0;                                 227         return 0;
229                                                   228 
230  out_enomem_free_avc:                             229  out_enomem_free_avc:
231         anon_vma_chain_free(avc);                 230         anon_vma_chain_free(avc);
232  out_enomem:                                      231  out_enomem:
233         return -ENOMEM;                           232         return -ENOMEM;
234 }                                                 233 }
235                                                   234 
236 /*                                                235 /*
237  * This is a useful helper function for lockin    236  * This is a useful helper function for locking the anon_vma root as
238  * we traverse the vma->anon_vma_chain, loopin    237  * we traverse the vma->anon_vma_chain, looping over anon_vma's that
239  * have the same vma.                             238  * have the same vma.
240  *                                                239  *
241  * Such anon_vma's should have the same root,     240  * Such anon_vma's should have the same root, so you'd expect to see
242  * just a single mutex_lock for the whole trav    241  * just a single mutex_lock for the whole traversal.
243  */                                               242  */
244 static inline struct anon_vma *lock_anon_vma_r    243 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
245 {                                                 244 {
246         struct anon_vma *new_root = anon_vma->    245         struct anon_vma *new_root = anon_vma->root;
247         if (new_root != root) {                   246         if (new_root != root) {
248                 if (WARN_ON_ONCE(root))           247                 if (WARN_ON_ONCE(root))
249                         up_write(&root->rwsem)    248                         up_write(&root->rwsem);
250                 root = new_root;                  249                 root = new_root;
251                 down_write(&root->rwsem);         250                 down_write(&root->rwsem);
252         }                                         251         }
253         return root;                              252         return root;
254 }                                                 253 }
255                                                   254 
256 static inline void unlock_anon_vma_root(struct    255 static inline void unlock_anon_vma_root(struct anon_vma *root)
257 {                                                 256 {
258         if (root)                                 257         if (root)
259                 up_write(&root->rwsem);           258                 up_write(&root->rwsem);
260 }                                                 259 }
261                                                   260 
262 /*                                                261 /*
263  * Attach the anon_vmas from src to dst.          262  * Attach the anon_vmas from src to dst.
264  * Returns 0 on success, -ENOMEM on failure.      263  * Returns 0 on success, -ENOMEM on failure.
265  *                                                264  *
266  * anon_vma_clone() is called by vma_expand(), !! 265  * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and
267  * copy_vma() and anon_vma_fork(). The first f !! 266  * anon_vma_fork(). The first three want an exact copy of src, while the last
268  * while the last one, anon_vma_fork(), may tr !! 267  * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
269  * prevent endless growth of anon_vma. Since d !! 268  * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
270  * call, we can identify this case by checking !! 269  * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
271  * src->anon_vma).                             << 
272  *                                                270  *
273  * If (!dst->anon_vma && src->anon_vma) is tru    271  * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
274  * and reuse existing anon_vma which has no vm    272  * and reuse existing anon_vma which has no vmas and only one child anon_vma.
275  * This prevents degradation of anon_vma hiera    273  * This prevents degradation of anon_vma hierarchy to endless linear chain in
276  * case of constantly forking task. On the oth    274  * case of constantly forking task. On the other hand, an anon_vma with more
277  * than one child isn't reused even if there w    275  * than one child isn't reused even if there was no alive vma, thus rmap
278  * walker has a good chance of avoiding scanni    276  * walker has a good chance of avoiding scanning the whole hierarchy when it
279  * searches where page is mapped.                 277  * searches where page is mapped.
280  */                                               278  */
281 int anon_vma_clone(struct vm_area_struct *dst,    279 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
282 {                                                 280 {
283         struct anon_vma_chain *avc, *pavc;        281         struct anon_vma_chain *avc, *pavc;
284         struct anon_vma *root = NULL;             282         struct anon_vma *root = NULL;
285                                                   283 
286         list_for_each_entry_reverse(pavc, &src    284         list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
287                 struct anon_vma *anon_vma;        285                 struct anon_vma *anon_vma;
288                                                   286 
289                 avc = anon_vma_chain_alloc(GFP    287                 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
290                 if (unlikely(!avc)) {             288                 if (unlikely(!avc)) {
291                         unlock_anon_vma_root(r    289                         unlock_anon_vma_root(root);
292                         root = NULL;              290                         root = NULL;
293                         avc = anon_vma_chain_a    291                         avc = anon_vma_chain_alloc(GFP_KERNEL);
294                         if (!avc)                 292                         if (!avc)
295                                 goto enomem_fa    293                                 goto enomem_failure;
296                 }                                 294                 }
297                 anon_vma = pavc->anon_vma;        295                 anon_vma = pavc->anon_vma;
298                 root = lock_anon_vma_root(root    296                 root = lock_anon_vma_root(root, anon_vma);
299                 anon_vma_chain_link(dst, avc,     297                 anon_vma_chain_link(dst, avc, anon_vma);
300                                                   298 
301                 /*                                299                 /*
302                  * Reuse existing anon_vma if     300                  * Reuse existing anon_vma if it has no vma and only one
303                  * anon_vma child.                301                  * anon_vma child.
304                  *                                302                  *
305                  * Root anon_vma is never reus    303                  * Root anon_vma is never reused:
306                  * it has self-parent referenc    304                  * it has self-parent reference and at least one child.
307                  */                               305                  */
308                 if (!dst->anon_vma && src->ano    306                 if (!dst->anon_vma && src->anon_vma &&
309                     anon_vma->num_children < 2    307                     anon_vma->num_children < 2 &&
310                     anon_vma->num_active_vmas     308                     anon_vma->num_active_vmas == 0)
311                         dst->anon_vma = anon_v    309                         dst->anon_vma = anon_vma;
312         }                                         310         }
313         if (dst->anon_vma)                        311         if (dst->anon_vma)
314                 dst->anon_vma->num_active_vmas    312                 dst->anon_vma->num_active_vmas++;
315         unlock_anon_vma_root(root);               313         unlock_anon_vma_root(root);
316         return 0;                                 314         return 0;
317                                                   315 
318  enomem_failure:                                  316  enomem_failure:
319         /*                                        317         /*
320          * dst->anon_vma is dropped here other    318          * dst->anon_vma is dropped here otherwise its num_active_vmas can
321          * be incorrectly decremented in unlin    319          * be incorrectly decremented in unlink_anon_vmas().
322          * We can safely do this because calle    320          * We can safely do this because callers of anon_vma_clone() don't care
323          * about dst->anon_vma if anon_vma_clo    321          * about dst->anon_vma if anon_vma_clone() failed.
324          */                                       322          */
325         dst->anon_vma = NULL;                     323         dst->anon_vma = NULL;
326         unlink_anon_vmas(dst);                    324         unlink_anon_vmas(dst);
327         return -ENOMEM;                           325         return -ENOMEM;
328 }                                                 326 }
329                                                   327 
330 /*                                                328 /*
331  * Attach vma to its own anon_vma, as well as     329  * Attach vma to its own anon_vma, as well as to the anon_vmas that
332  * the corresponding VMA in the parent process    330  * the corresponding VMA in the parent process is attached to.
333  * Returns 0 on success, non-zero on failure.     331  * Returns 0 on success, non-zero on failure.
334  */                                               332  */
335 int anon_vma_fork(struct vm_area_struct *vma,     333 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
336 {                                                 334 {
337         struct anon_vma_chain *avc;               335         struct anon_vma_chain *avc;
338         struct anon_vma *anon_vma;                336         struct anon_vma *anon_vma;
339         int error;                                337         int error;
340                                                   338 
341         /* Don't bother if the parent process     339         /* Don't bother if the parent process has no anon_vma here. */
342         if (!pvma->anon_vma)                      340         if (!pvma->anon_vma)
343                 return 0;                         341                 return 0;
344                                                   342 
345         /* Drop inherited anon_vma, we'll reus    343         /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
346         vma->anon_vma = NULL;                     344         vma->anon_vma = NULL;
347                                                   345 
348         /*                                        346         /*
349          * First, attach the new VMA to the pa    347          * First, attach the new VMA to the parent VMA's anon_vmas,
350          * so rmap can find non-COWed pages in    348          * so rmap can find non-COWed pages in child processes.
351          */                                       349          */
352         error = anon_vma_clone(vma, pvma);        350         error = anon_vma_clone(vma, pvma);
353         if (error)                                351         if (error)
354                 return error;                     352                 return error;
355                                                   353 
356         /* An existing anon_vma has been reuse    354         /* An existing anon_vma has been reused, all done then. */
357         if (vma->anon_vma)                        355         if (vma->anon_vma)
358                 return 0;                         356                 return 0;
359                                                   357 
360         /* Then add our own anon_vma. */          358         /* Then add our own anon_vma. */
361         anon_vma = anon_vma_alloc();              359         anon_vma = anon_vma_alloc();
362         if (!anon_vma)                            360         if (!anon_vma)
363                 goto out_error;                   361                 goto out_error;
364         anon_vma->num_active_vmas++;              362         anon_vma->num_active_vmas++;
365         avc = anon_vma_chain_alloc(GFP_KERNEL)    363         avc = anon_vma_chain_alloc(GFP_KERNEL);
366         if (!avc)                                 364         if (!avc)
367                 goto out_error_free_anon_vma;     365                 goto out_error_free_anon_vma;
368                                                   366 
369         /*                                        367         /*
370          * The root anon_vma's rwsem is the lo    368          * The root anon_vma's rwsem is the lock actually used when we
371          * lock any of the anon_vmas in this a    369          * lock any of the anon_vmas in this anon_vma tree.
372          */                                       370          */
373         anon_vma->root = pvma->anon_vma->root;    371         anon_vma->root = pvma->anon_vma->root;
374         anon_vma->parent = pvma->anon_vma;        372         anon_vma->parent = pvma->anon_vma;
375         /*                                        373         /*
376          * With refcounts, an anon_vma can sta    374          * With refcounts, an anon_vma can stay around longer than the
377          * process it belongs to. The root ano    375          * process it belongs to. The root anon_vma needs to be pinned until
378          * this anon_vma is freed, because the    376          * this anon_vma is freed, because the lock lives in the root.
379          */                                       377          */
380         get_anon_vma(anon_vma->root);             378         get_anon_vma(anon_vma->root);
381         /* Mark this anon_vma as the one where    379         /* Mark this anon_vma as the one where our new (COWed) pages go. */
382         vma->anon_vma = anon_vma;                 380         vma->anon_vma = anon_vma;
383         anon_vma_lock_write(anon_vma);            381         anon_vma_lock_write(anon_vma);
384         anon_vma_chain_link(vma, avc, anon_vma    382         anon_vma_chain_link(vma, avc, anon_vma);
385         anon_vma->parent->num_children++;         383         anon_vma->parent->num_children++;
386         anon_vma_unlock_write(anon_vma);          384         anon_vma_unlock_write(anon_vma);
387                                                   385 
388         return 0;                                 386         return 0;
389                                                   387 
390  out_error_free_anon_vma:                         388  out_error_free_anon_vma:
391         put_anon_vma(anon_vma);                   389         put_anon_vma(anon_vma);
392  out_error:                                       390  out_error:
393         unlink_anon_vmas(vma);                    391         unlink_anon_vmas(vma);
394         return -ENOMEM;                           392         return -ENOMEM;
395 }                                                 393 }
396                                                   394 
397 void unlink_anon_vmas(struct vm_area_struct *v    395 void unlink_anon_vmas(struct vm_area_struct *vma)
398 {                                                 396 {
399         struct anon_vma_chain *avc, *next;        397         struct anon_vma_chain *avc, *next;
400         struct anon_vma *root = NULL;             398         struct anon_vma *root = NULL;
401                                                   399 
402         /*                                        400         /*
403          * Unlink each anon_vma chained to the    401          * Unlink each anon_vma chained to the VMA.  This list is ordered
404          * from newest to oldest, ensuring the    402          * from newest to oldest, ensuring the root anon_vma gets freed last.
405          */                                       403          */
406         list_for_each_entry_safe(avc, next, &v    404         list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
407                 struct anon_vma *anon_vma = av    405                 struct anon_vma *anon_vma = avc->anon_vma;
408                                                   406 
409                 root = lock_anon_vma_root(root    407                 root = lock_anon_vma_root(root, anon_vma);
410                 anon_vma_interval_tree_remove(    408                 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
411                                                   409 
412                 /*                                410                 /*
413                  * Leave empty anon_vmas on th    411                  * Leave empty anon_vmas on the list - we'll need
414                  * to free them outside the lo    412                  * to free them outside the lock.
415                  */                               413                  */
416                 if (RB_EMPTY_ROOT(&anon_vma->r    414                 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
417                         anon_vma->parent->num_    415                         anon_vma->parent->num_children--;
418                         continue;                 416                         continue;
419                 }                                 417                 }
420                                                   418 
421                 list_del(&avc->same_vma);         419                 list_del(&avc->same_vma);
422                 anon_vma_chain_free(avc);         420                 anon_vma_chain_free(avc);
423         }                                         421         }
424         if (vma->anon_vma) {                      422         if (vma->anon_vma) {
425                 vma->anon_vma->num_active_vmas    423                 vma->anon_vma->num_active_vmas--;
426                                                   424 
427                 /*                                425                 /*
428                  * vma would still be needed a    426                  * vma would still be needed after unlink, and anon_vma will be prepared
429                  * when handle fault.             427                  * when handle fault.
430                  */                               428                  */
431                 vma->anon_vma = NULL;             429                 vma->anon_vma = NULL;
432         }                                         430         }
433         unlock_anon_vma_root(root);               431         unlock_anon_vma_root(root);
434                                                   432 
435         /*                                        433         /*
436          * Iterate the list once more, it now     434          * Iterate the list once more, it now only contains empty and unlinked
437          * anon_vmas, destroy them. Could not     435          * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
438          * needing to write-acquire the anon_v    436          * needing to write-acquire the anon_vma->root->rwsem.
439          */                                       437          */
440         list_for_each_entry_safe(avc, next, &v    438         list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
441                 struct anon_vma *anon_vma = av    439                 struct anon_vma *anon_vma = avc->anon_vma;
442                                                   440 
443                 VM_WARN_ON(anon_vma->num_child    441                 VM_WARN_ON(anon_vma->num_children);
444                 VM_WARN_ON(anon_vma->num_activ    442                 VM_WARN_ON(anon_vma->num_active_vmas);
445                 put_anon_vma(anon_vma);           443                 put_anon_vma(anon_vma);
446                                                   444 
447                 list_del(&avc->same_vma);         445                 list_del(&avc->same_vma);
448                 anon_vma_chain_free(avc);         446                 anon_vma_chain_free(avc);
449         }                                         447         }
450 }                                                 448 }
451                                                   449 
452 static void anon_vma_ctor(void *data)             450 static void anon_vma_ctor(void *data)
453 {                                                 451 {
454         struct anon_vma *anon_vma = data;         452         struct anon_vma *anon_vma = data;
455                                                   453 
456         init_rwsem(&anon_vma->rwsem);             454         init_rwsem(&anon_vma->rwsem);
457         atomic_set(&anon_vma->refcount, 0);       455         atomic_set(&anon_vma->refcount, 0);
458         anon_vma->rb_root = RB_ROOT_CACHED;       456         anon_vma->rb_root = RB_ROOT_CACHED;
459 }                                                 457 }
460                                                   458 
461 void __init anon_vma_init(void)                   459 void __init anon_vma_init(void)
462 {                                                 460 {
463         anon_vma_cachep = kmem_cache_create("a    461         anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
464                         0, SLAB_TYPESAFE_BY_RC    462                         0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
465                         anon_vma_ctor);           463                         anon_vma_ctor);
466         anon_vma_chain_cachep = KMEM_CACHE(ano    464         anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
467                         SLAB_PANIC|SLAB_ACCOUN    465                         SLAB_PANIC|SLAB_ACCOUNT);
468 }                                                 466 }
469                                                   467 
470 /*                                                468 /*
471  * Getting a lock on a stable anon_vma from a     469  * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
472  *                                                470  *
473  * Since there is no serialization what so eve !! 471  * Since there is no serialization what so ever against page_remove_rmap()
474  * the best this function can do is return a r    472  * the best this function can do is return a refcount increased anon_vma
475  * that might have been relevant to this page.    473  * that might have been relevant to this page.
476  *                                                474  *
477  * The page might have been remapped to a diff    475  * The page might have been remapped to a different anon_vma or the anon_vma
478  * returned may already be freed (and even reu    476  * returned may already be freed (and even reused).
479  *                                                477  *
480  * In case it was remapped to a different anon    478  * In case it was remapped to a different anon_vma, the new anon_vma will be a
481  * child of the old anon_vma, and the anon_vma    479  * child of the old anon_vma, and the anon_vma lifetime rules will therefore
482  * ensure that any anon_vma obtained from the     480  * ensure that any anon_vma obtained from the page will still be valid for as
483  * long as we observe page_mapped() [ hence al    481  * long as we observe page_mapped() [ hence all those page_mapped() tests ].
484  *                                                482  *
485  * All users of this function must be very car    483  * All users of this function must be very careful when walking the anon_vma
486  * chain and verify that the page in question     484  * chain and verify that the page in question is indeed mapped in it
487  * [ something equivalent to page_mapped_in_vm    485  * [ something equivalent to page_mapped_in_vma() ].
488  *                                                486  *
489  * Since anon_vma's slab is SLAB_TYPESAFE_BY_R    487  * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
490  * folio_remove_rmap_*() that the anon_vma poi !! 488  * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
491  * if there is a mapcount, we can dereference     489  * if there is a mapcount, we can dereference the anon_vma after observing
492  * those.                                         490  * those.
493  *                                             << 
494  * NOTE: the caller should normally hold folio << 
495  * not, the caller needs to double check the a << 
496  * taking the anon_vma lock for either read or << 
497  * concurrently without folio lock protection) << 
498  * which has already covered that, and comment << 
499  */                                               491  */
500 struct anon_vma *folio_get_anon_vma(struct fol    492 struct anon_vma *folio_get_anon_vma(struct folio *folio)
501 {                                                 493 {
502         struct anon_vma *anon_vma = NULL;         494         struct anon_vma *anon_vma = NULL;
503         unsigned long anon_mapping;               495         unsigned long anon_mapping;
504                                                   496 
505         rcu_read_lock();                          497         rcu_read_lock();
506         anon_mapping = (unsigned long)READ_ONC    498         anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
507         if ((anon_mapping & PAGE_MAPPING_FLAGS    499         if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
508                 goto out;                         500                 goto out;
509         if (!folio_mapped(folio))                 501         if (!folio_mapped(folio))
510                 goto out;                         502                 goto out;
511                                                   503 
512         anon_vma = (struct anon_vma *) (anon_m    504         anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
513         if (!atomic_inc_not_zero(&anon_vma->re    505         if (!atomic_inc_not_zero(&anon_vma->refcount)) {
514                 anon_vma = NULL;                  506                 anon_vma = NULL;
515                 goto out;                         507                 goto out;
516         }                                         508         }
517                                                   509 
518         /*                                        510         /*
519          * If this folio is still mapped, then    511          * If this folio is still mapped, then its anon_vma cannot have been
520          * freed.  But if it has been unmapped    512          * freed.  But if it has been unmapped, we have no security against the
521          * anon_vma structure being freed and     513          * anon_vma structure being freed and reused (for another anon_vma:
522          * SLAB_TYPESAFE_BY_RCU guarantees tha    514          * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
523          * above cannot corrupt).                 515          * above cannot corrupt).
524          */                                       516          */
525         if (!folio_mapped(folio)) {               517         if (!folio_mapped(folio)) {
526                 rcu_read_unlock();                518                 rcu_read_unlock();
527                 put_anon_vma(anon_vma);           519                 put_anon_vma(anon_vma);
528                 return NULL;                      520                 return NULL;
529         }                                         521         }
530 out:                                              522 out:
531         rcu_read_unlock();                        523         rcu_read_unlock();
532                                                   524 
533         return anon_vma;                          525         return anon_vma;
534 }                                                 526 }
535                                                   527 
536 /*                                                528 /*
537  * Similar to folio_get_anon_vma() except it l    529  * Similar to folio_get_anon_vma() except it locks the anon_vma.
538  *                                                530  *
539  * Its a little more complex as it tries to ke    531  * Its a little more complex as it tries to keep the fast path to a single
540  * atomic op -- the trylock. If we fail the tr    532  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
541  * reference like with folio_get_anon_vma() an    533  * reference like with folio_get_anon_vma() and then block on the mutex
542  * on !rwc->try_lock case.                        534  * on !rwc->try_lock case.
543  */                                               535  */
544 struct anon_vma *folio_lock_anon_vma_read(stru    536 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
545                                           stru    537                                           struct rmap_walk_control *rwc)
546 {                                                 538 {
547         struct anon_vma *anon_vma = NULL;         539         struct anon_vma *anon_vma = NULL;
548         struct anon_vma *root_anon_vma;           540         struct anon_vma *root_anon_vma;
549         unsigned long anon_mapping;               541         unsigned long anon_mapping;
550                                                   542 
551 retry:                                         << 
552         rcu_read_lock();                          543         rcu_read_lock();
553         anon_mapping = (unsigned long)READ_ONC    544         anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
554         if ((anon_mapping & PAGE_MAPPING_FLAGS    545         if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
555                 goto out;                         546                 goto out;
556         if (!folio_mapped(folio))                 547         if (!folio_mapped(folio))
557                 goto out;                         548                 goto out;
558                                                   549 
559         anon_vma = (struct anon_vma *) (anon_m    550         anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
560         root_anon_vma = READ_ONCE(anon_vma->ro    551         root_anon_vma = READ_ONCE(anon_vma->root);
561         if (down_read_trylock(&root_anon_vma->    552         if (down_read_trylock(&root_anon_vma->rwsem)) {
562                 /*                                553                 /*
563                  * folio_move_anon_rmap() migh << 
564                  * might not hold the folio lo << 
565                  */                            << 
566                 if (unlikely((unsigned long)RE << 
567                              anon_mapping)) {  << 
568                         up_read(&root_anon_vma << 
569                         rcu_read_unlock();     << 
570                         goto retry;            << 
571                 }                              << 
572                                                << 
573                 /*                             << 
574                  * If the folio is still mappe    554                  * If the folio is still mapped, then this anon_vma is still
575                  * its anon_vma, and holding t    555                  * its anon_vma, and holding the mutex ensures that it will
576                  * not go away, see anon_vma_f    556                  * not go away, see anon_vma_free().
577                  */                               557                  */
578                 if (!folio_mapped(folio)) {       558                 if (!folio_mapped(folio)) {
579                         up_read(&root_anon_vma    559                         up_read(&root_anon_vma->rwsem);
580                         anon_vma = NULL;          560                         anon_vma = NULL;
581                 }                                 561                 }
582                 goto out;                         562                 goto out;
583         }                                         563         }
584                                                   564 
585         if (rwc && rwc->try_lock) {               565         if (rwc && rwc->try_lock) {
586                 anon_vma = NULL;                  566                 anon_vma = NULL;
587                 rwc->contended = true;            567                 rwc->contended = true;
588                 goto out;                         568                 goto out;
589         }                                         569         }
590                                                   570 
591         /* trylock failed, we got to sleep */     571         /* trylock failed, we got to sleep */
592         if (!atomic_inc_not_zero(&anon_vma->re    572         if (!atomic_inc_not_zero(&anon_vma->refcount)) {
593                 anon_vma = NULL;                  573                 anon_vma = NULL;
594                 goto out;                         574                 goto out;
595         }                                         575         }
596                                                   576 
597         if (!folio_mapped(folio)) {               577         if (!folio_mapped(folio)) {
598                 rcu_read_unlock();                578                 rcu_read_unlock();
599                 put_anon_vma(anon_vma);           579                 put_anon_vma(anon_vma);
600                 return NULL;                      580                 return NULL;
601         }                                         581         }
602                                                   582 
603         /* we pinned the anon_vma, its safe to    583         /* we pinned the anon_vma, its safe to sleep */
604         rcu_read_unlock();                        584         rcu_read_unlock();
605         anon_vma_lock_read(anon_vma);             585         anon_vma_lock_read(anon_vma);
606                                                   586 
607         /*                                     << 
608          * folio_move_anon_rmap() might have c << 
609          * not hold the folio lock here.       << 
610          */                                    << 
611         if (unlikely((unsigned long)READ_ONCE( << 
612                      anon_mapping)) {          << 
613                 anon_vma_unlock_read(anon_vma) << 
614                 put_anon_vma(anon_vma);        << 
615                 anon_vma = NULL;               << 
616                 goto retry;                    << 
617         }                                      << 
618                                                << 
619         if (atomic_dec_and_test(&anon_vma->ref    587         if (atomic_dec_and_test(&anon_vma->refcount)) {
620                 /*                                588                 /*
621                  * Oops, we held the last refc    589                  * Oops, we held the last refcount, release the lock
622                  * and bail -- can't simply us    590                  * and bail -- can't simply use put_anon_vma() because
623                  * we'll deadlock on the anon_    591                  * we'll deadlock on the anon_vma_lock_write() recursion.
624                  */                               592                  */
625                 anon_vma_unlock_read(anon_vma)    593                 anon_vma_unlock_read(anon_vma);
626                 __put_anon_vma(anon_vma);         594                 __put_anon_vma(anon_vma);
627                 anon_vma = NULL;                  595                 anon_vma = NULL;
628         }                                         596         }
629                                                   597 
630         return anon_vma;                          598         return anon_vma;
631                                                   599 
632 out:                                              600 out:
633         rcu_read_unlock();                        601         rcu_read_unlock();
634         return anon_vma;                          602         return anon_vma;
635 }                                                 603 }
636                                                   604 
637 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUS    605 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
638 /*                                                606 /*
639  * Flush TLB entries for recently unmapped pag    607  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
640  * important if a PTE was dirty when it was un    608  * important if a PTE was dirty when it was unmapped that it's flushed
641  * before any IO is initiated on the page to p    609  * before any IO is initiated on the page to prevent lost writes. Similarly,
642  * it must be flushed before freeing to preven    610  * it must be flushed before freeing to prevent data leakage.
643  */                                               611  */
644 void try_to_unmap_flush(void)                     612 void try_to_unmap_flush(void)
645 {                                                 613 {
646         struct tlbflush_unmap_batch *tlb_ubc =    614         struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
647                                                   615 
648         if (!tlb_ubc->flush_required)             616         if (!tlb_ubc->flush_required)
649                 return;                           617                 return;
650                                                   618 
651         arch_tlbbatch_flush(&tlb_ubc->arch);      619         arch_tlbbatch_flush(&tlb_ubc->arch);
652         tlb_ubc->flush_required = false;          620         tlb_ubc->flush_required = false;
653         tlb_ubc->writable = false;                621         tlb_ubc->writable = false;
654 }                                                 622 }
655                                                   623 
656 /* Flush iff there are potentially writable TL    624 /* Flush iff there are potentially writable TLB entries that can race with IO */
657 void try_to_unmap_flush_dirty(void)               625 void try_to_unmap_flush_dirty(void)
658 {                                                 626 {
659         struct tlbflush_unmap_batch *tlb_ubc =    627         struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
660                                                   628 
661         if (tlb_ubc->writable)                    629         if (tlb_ubc->writable)
662                 try_to_unmap_flush();             630                 try_to_unmap_flush();
663 }                                                 631 }
664                                                   632 
665 /*                                                633 /*
666  * Bits 0-14 of mm->tlb_flush_batched record p    634  * Bits 0-14 of mm->tlb_flush_batched record pending generations.
667  * Bits 16-30 of mm->tlb_flush_batched bit rec    635  * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
668  */                                               636  */
669 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT   16        637 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT   16
670 #define TLB_FLUSH_BATCH_PENDING_MASK              638 #define TLB_FLUSH_BATCH_PENDING_MASK                    \
671         ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT     639         ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
672 #define TLB_FLUSH_BATCH_PENDING_LARGE             640 #define TLB_FLUSH_BATCH_PENDING_LARGE                   \
673         (TLB_FLUSH_BATCH_PENDING_MASK / 2)        641         (TLB_FLUSH_BATCH_PENDING_MASK / 2)
674                                                   642 
675 static void set_tlb_ubc_flush_pending(struct m !! 643 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
676                                       unsigned << 
677 {                                                 644 {
678         struct tlbflush_unmap_batch *tlb_ubc =    645         struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
679         int batch;                             !! 646         int batch, nbatch;
680         bool writable = pte_dirty(pteval);     << 
681                                                << 
682         if (!pte_accessible(mm, pteval))       << 
683                 return;                        << 
684                                                   647 
685         arch_tlbbatch_add_pending(&tlb_ubc->ar !! 648         arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
686         tlb_ubc->flush_required = true;           649         tlb_ubc->flush_required = true;
687                                                   650 
688         /*                                        651         /*
689          * Ensure compiler does not re-order t    652          * Ensure compiler does not re-order the setting of tlb_flush_batched
690          * before the PTE is cleared.             653          * before the PTE is cleared.
691          */                                       654          */
692         barrier();                                655         barrier();
693         batch = atomic_read(&mm->tlb_flush_bat    656         batch = atomic_read(&mm->tlb_flush_batched);
694 retry:                                            657 retry:
695         if ((batch & TLB_FLUSH_BATCH_PENDING_M    658         if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
696                 /*                                659                 /*
697                  * Prevent `pending' from catc    660                  * Prevent `pending' from catching up with `flushed' because of
698                  * overflow.  Reset `pending'     661                  * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
699                  * `pending' becomes large.       662                  * `pending' becomes large.
700                  */                               663                  */
701                 if (!atomic_try_cmpxchg(&mm->t !! 664                 nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1);
                                                   >> 665                 if (nbatch != batch) {
                                                   >> 666                         batch = nbatch;
702                         goto retry;               667                         goto retry;
                                                   >> 668                 }
703         } else {                                  669         } else {
704                 atomic_inc(&mm->tlb_flush_batc    670                 atomic_inc(&mm->tlb_flush_batched);
705         }                                         671         }
706                                                   672 
707         /*                                        673         /*
708          * If the PTE was dirty then it's best    674          * If the PTE was dirty then it's best to assume it's writable. The
709          * caller must use try_to_unmap_flush_    675          * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
710          * before the page is queued for IO.      676          * before the page is queued for IO.
711          */                                       677          */
712         if (writable)                             678         if (writable)
713                 tlb_ubc->writable = true;         679                 tlb_ubc->writable = true;
714 }                                                 680 }
715                                                   681 
716 /*                                                682 /*
717  * Returns true if the TLB flush should be def    683  * Returns true if the TLB flush should be deferred to the end of a batch of
718  * unmap operations to reduce IPIs.               684  * unmap operations to reduce IPIs.
719  */                                               685  */
720 static bool should_defer_flush(struct mm_struc    686 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
721 {                                                 687 {
                                                   >> 688         bool should_defer = false;
                                                   >> 689 
722         if (!(flags & TTU_BATCH_FLUSH))           690         if (!(flags & TTU_BATCH_FLUSH))
723                 return false;                     691                 return false;
724                                                   692 
725         return arch_tlbbatch_should_defer(mm); !! 693         /* If remote CPUs need to be flushed then defer batch the flush */
                                                   >> 694         if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
                                                   >> 695                 should_defer = true;
                                                   >> 696         put_cpu();
                                                   >> 697 
                                                   >> 698         return should_defer;
726 }                                                 699 }
727                                                   700 
728 /*                                                701 /*
729  * Reclaim unmaps pages under the PTL but do n    702  * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
730  * releasing the PTL if TLB flushes are batche    703  * releasing the PTL if TLB flushes are batched. It's possible for a parallel
731  * operation such as mprotect or munmap to rac    704  * operation such as mprotect or munmap to race between reclaim unmapping
732  * the page and flushing the page. If this rac    705  * the page and flushing the page. If this race occurs, it potentially allows
733  * access to data via a stale TLB entry. Track    706  * access to data via a stale TLB entry. Tracking all mm's that have TLB
734  * batching in flight would be expensive durin    707  * batching in flight would be expensive during reclaim so instead track
735  * whether TLB batching occurred in the past a    708  * whether TLB batching occurred in the past and if so then do a flush here
736  * if required. This will cost one additional     709  * if required. This will cost one additional flush per reclaim cycle paid
737  * by the first operation at risk such as mpro    710  * by the first operation at risk such as mprotect and mumap.
738  *                                                711  *
739  * This must be called under the PTL so that a    712  * This must be called under the PTL so that an access to tlb_flush_batched
740  * that is potentially a "reclaim vs mprotect/    713  * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
741  * via the PTL.                                   714  * via the PTL.
742  */                                               715  */
743 void flush_tlb_batched_pending(struct mm_struc    716 void flush_tlb_batched_pending(struct mm_struct *mm)
744 {                                                 717 {
745         int batch = atomic_read(&mm->tlb_flush    718         int batch = atomic_read(&mm->tlb_flush_batched);
746         int pending = batch & TLB_FLUSH_BATCH_    719         int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
747         int flushed = batch >> TLB_FLUSH_BATCH    720         int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
748                                                   721 
749         if (pending != flushed) {                 722         if (pending != flushed) {
750                 arch_flush_tlb_batched_pending !! 723                 flush_tlb_mm(mm);
751                 /*                                724                 /*
752                  * If the new TLB flushing is     725                  * If the new TLB flushing is pending during flushing, leave
753                  * mm->tlb_flush_batched as is    726                  * mm->tlb_flush_batched as is, to avoid losing flushing.
754                  */                               727                  */
755                 atomic_cmpxchg(&mm->tlb_flush_    728                 atomic_cmpxchg(&mm->tlb_flush_batched, batch,
756                                pending | (pend    729                                pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
757         }                                         730         }
758 }                                                 731 }
759 #else                                             732 #else
760 static void set_tlb_ubc_flush_pending(struct m !! 733 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
761                                       unsigned << 
762 {                                                 734 {
763 }                                                 735 }
764                                                   736 
765 static bool should_defer_flush(struct mm_struc    737 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
766 {                                                 738 {
767         return false;                             739         return false;
768 }                                                 740 }
769 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_F    741 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
770                                                   742 
771 /*                                                743 /*
772  * At what user virtual address is page expect    744  * At what user virtual address is page expected in vma?
773  * Caller should check the page is actually pa    745  * Caller should check the page is actually part of the vma.
774  */                                               746  */
775 unsigned long page_address_in_vma(struct page     747 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
776 {                                                 748 {
777         struct folio *folio = page_folio(page)    749         struct folio *folio = page_folio(page);
778         pgoff_t pgoff;                         << 
779                                                << 
780         if (folio_test_anon(folio)) {             750         if (folio_test_anon(folio)) {
781                 struct anon_vma *page__anon_vm    751                 struct anon_vma *page__anon_vma = folio_anon_vma(folio);
782                 /*                                752                 /*
783                  * Note: swapoff's unuse_vma()    753                  * Note: swapoff's unuse_vma() is more efficient with this
784                  * check, and needs it to matc    754                  * check, and needs it to match anon_vma when KSM is active.
785                  */                               755                  */
786                 if (!vma->anon_vma || !page__a    756                 if (!vma->anon_vma || !page__anon_vma ||
787                     vma->anon_vma->root != pag    757                     vma->anon_vma->root != page__anon_vma->root)
788                         return -EFAULT;           758                         return -EFAULT;
789         } else if (!vma->vm_file) {               759         } else if (!vma->vm_file) {
790                 return -EFAULT;                   760                 return -EFAULT;
791         } else if (vma->vm_file->f_mapping !=     761         } else if (vma->vm_file->f_mapping != folio->mapping) {
792                 return -EFAULT;                   762                 return -EFAULT;
793         }                                         763         }
794                                                   764 
795         /* The !page__anon_vma above handles K !! 765         return vma_address(page, vma);
796         pgoff = folio->index + folio_page_idx( << 
797         return vma_address(vma, pgoff, 1);     << 
798 }                                                 766 }
799                                                   767 
800 /*                                                768 /*
801  * Returns the actual pmd_t* where we expect '    769  * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
802  * NULL if it doesn't exist.  No guarantees /     770  * NULL if it doesn't exist.  No guarantees / checks on what the pmd_t*
803  * represents.                                    771  * represents.
804  */                                               772  */
805 pmd_t *mm_find_pmd(struct mm_struct *mm, unsig    773 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
806 {                                                 774 {
807         pgd_t *pgd;                               775         pgd_t *pgd;
808         p4d_t *p4d;                               776         p4d_t *p4d;
809         pud_t *pud;                               777         pud_t *pud;
810         pmd_t *pmd = NULL;                        778         pmd_t *pmd = NULL;
811                                                   779 
812         pgd = pgd_offset(mm, address);            780         pgd = pgd_offset(mm, address);
813         if (!pgd_present(*pgd))                   781         if (!pgd_present(*pgd))
814                 goto out;                         782                 goto out;
815                                                   783 
816         p4d = p4d_offset(pgd, address);           784         p4d = p4d_offset(pgd, address);
817         if (!p4d_present(*p4d))                   785         if (!p4d_present(*p4d))
818                 goto out;                         786                 goto out;
819                                                   787 
820         pud = pud_offset(p4d, address);           788         pud = pud_offset(p4d, address);
821         if (!pud_present(*pud))                   789         if (!pud_present(*pud))
822                 goto out;                         790                 goto out;
823                                                   791 
824         pmd = pmd_offset(pud, address);           792         pmd = pmd_offset(pud, address);
825 out:                                              793 out:
826         return pmd;                               794         return pmd;
827 }                                                 795 }
828                                                   796 
829 struct folio_referenced_arg {                     797 struct folio_referenced_arg {
830         int mapcount;                             798         int mapcount;
831         int referenced;                           799         int referenced;
832         unsigned long vm_flags;                   800         unsigned long vm_flags;
833         struct mem_cgroup *memcg;                 801         struct mem_cgroup *memcg;
834 };                                                802 };
835                                                << 
836 /*                                                803 /*
837  * arg: folio_referenced_arg will be passed       804  * arg: folio_referenced_arg will be passed
838  */                                               805  */
839 static bool folio_referenced_one(struct folio     806 static bool folio_referenced_one(struct folio *folio,
840                 struct vm_area_struct *vma, un    807                 struct vm_area_struct *vma, unsigned long address, void *arg)
841 {                                                 808 {
842         struct folio_referenced_arg *pra = arg    809         struct folio_referenced_arg *pra = arg;
843         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma    810         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
844         int referenced = 0;                       811         int referenced = 0;
845         unsigned long start = address, ptes =  << 
846                                                   812 
847         while (page_vma_mapped_walk(&pvmw)) {     813         while (page_vma_mapped_walk(&pvmw)) {
848                 address = pvmw.address;           814                 address = pvmw.address;
849                                                   815 
850                 if (vma->vm_flags & VM_LOCKED) !! 816                 if ((vma->vm_flags & VM_LOCKED) &&
851                         if (!folio_test_large( !! 817                     (!folio_test_large(folio) || !pvmw.pte)) {
852                                 /* Restore the !! 818                         /* Restore the mlock which got missed */
853                                 mlock_vma_foli !! 819                         mlock_vma_folio(folio, vma, !pvmw.pte);
854                                 page_vma_mappe << 
855                                 pra->vm_flags  << 
856                                 return false;  << 
857                         }                      << 
858                         /*                     << 
859                          * For large folio ful << 
860                          * be handled after th << 
861                          *                     << 
862                          * For large folio cro << 
863                          * expected to be pick << 
864                          * should skip referen << 
865                          * the range of VM_LOC << 
866                          * should just count t << 
867                          * the range of VM_LOC << 
868                          */                    << 
869                         ptes++;                << 
870                         pra->mapcount--;       << 
871                         continue;              << 
872                 }                              << 
873                                                << 
874                 /*                             << 
875                  * Skip the non-shared swapbac << 
876                  * the exiting or OOM-reaped p << 
877                  * swap-out followed by an imm << 
878                  */                            << 
879                 if ((!atomic_read(&vma->vm_mm- << 
880                     check_stable_address_space << 
881                     folio_test_anon(folio) &&  << 
882                     !folio_likely_mapped_share << 
883                         pra->referenced = -1;  << 
884                         page_vma_mapped_walk_d    820                         page_vma_mapped_walk_done(&pvmw);
885                         return false;          !! 821                         pra->vm_flags |= VM_LOCKED;
                                                   >> 822                         return false; /* To break the loop */
886                 }                                 823                 }
887                                                   824 
888                 if (lru_gen_enabled() && pvmw. !! 825                 if (pvmw.pte) {
889                         if (lru_gen_look_aroun !! 826                         if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
                                                   >> 827                             !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
                                                   >> 828                                 lru_gen_look_around(&pvmw);
890                                 referenced++;     829                                 referenced++;
891                 } else if (pvmw.pte) {         !! 830                         }
                                                   >> 831 
892                         if (ptep_clear_flush_y    832                         if (ptep_clear_flush_young_notify(vma, address,
893                                                !! 833                                                 pvmw.pte)) {
894                                 referenced++;  !! 834                                 /*
                                                   >> 835                                  * Don't treat a reference through
                                                   >> 836                                  * a sequentially read mapping as such.
                                                   >> 837                                  * If the folio has been used in another mapping,
                                                   >> 838                                  * we will catch it; if this other mapping is
                                                   >> 839                                  * already gone, the unmap path will have set
                                                   >> 840                                  * the referenced flag or activated the folio.
                                                   >> 841                                  */
                                                   >> 842                                 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
                                                   >> 843                                         referenced++;
                                                   >> 844                         }
895                 } else if (IS_ENABLED(CONFIG_T    845                 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
896                         if (pmdp_clear_flush_y    846                         if (pmdp_clear_flush_young_notify(vma, address,
897                                                   847                                                 pvmw.pmd))
898                                 referenced++;     848                                 referenced++;
899                 } else {                          849                 } else {
900                         /* unexpected pmd-mapp    850                         /* unexpected pmd-mapped folio? */
901                         WARN_ON_ONCE(1);          851                         WARN_ON_ONCE(1);
902                 }                                 852                 }
903                                                   853 
904                 pra->mapcount--;                  854                 pra->mapcount--;
905         }                                         855         }
906                                                   856 
907         if ((vma->vm_flags & VM_LOCKED) &&     << 
908                         folio_test_large(folio << 
909                         folio_within_vma(folio << 
910                 unsigned long s_align, e_align << 
911                                                << 
912                 s_align = ALIGN_DOWN(start, PM << 
913                 e_align = ALIGN_DOWN(start + f << 
914                                                << 
915                 /* folio doesn't cross page ta << 
916                 if ((s_align == e_align) && (p << 
917                         /* Restore the mlock w << 
918                         mlock_vma_folio(folio, << 
919                         pra->vm_flags |= VM_LO << 
920                         return false; /* To br << 
921                 }                              << 
922         }                                      << 
923                                                << 
924         if (referenced)                           857         if (referenced)
925                 folio_clear_idle(folio);          858                 folio_clear_idle(folio);
926         if (folio_test_clear_young(folio))        859         if (folio_test_clear_young(folio))
927                 referenced++;                     860                 referenced++;
928                                                   861 
929         if (referenced) {                         862         if (referenced) {
930                 pra->referenced++;                863                 pra->referenced++;
931                 pra->vm_flags |= vma->vm_flags    864                 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
932         }                                         865         }
933                                                   866 
934         if (!pra->mapcount)                       867         if (!pra->mapcount)
935                 return false; /* To break the     868                 return false; /* To break the loop */
936                                                   869 
937         return true;                              870         return true;
938 }                                                 871 }
939                                                   872 
940 static bool invalid_folio_referenced_vma(struc    873 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
941 {                                                 874 {
942         struct folio_referenced_arg *pra = arg    875         struct folio_referenced_arg *pra = arg;
943         struct mem_cgroup *memcg = pra->memcg;    876         struct mem_cgroup *memcg = pra->memcg;
944                                                   877 
945         /*                                     !! 878         if (!mm_match_cgroup(vma->vm_mm, memcg))
946          * Ignore references from this mapping << 
947          * folio has been used in another mapp << 
948          * other mapping is already gone, the  << 
949          * referenced flag or activated the fo << 
950          */                                    << 
951         if (!vma_has_recency(vma))             << 
952                 return true;                   << 
953                                                << 
954         /*                                     << 
955          * If we are reclaiming on behalf of a << 
956          * of references from different cgroup << 
957          */                                    << 
958         if (memcg && !mm_match_cgroup(vma->vm_ << 
959                 return true;                      879                 return true;
960                                                   880 
961         return false;                             881         return false;
962 }                                                 882 }
963                                                   883 
964 /**                                               884 /**
965  * folio_referenced() - Test if the folio was     885  * folio_referenced() - Test if the folio was referenced.
966  * @folio: The folio to test.                     886  * @folio: The folio to test.
967  * @is_locked: Caller holds lock on the folio.    887  * @is_locked: Caller holds lock on the folio.
968  * @memcg: target memory cgroup                   888  * @memcg: target memory cgroup
969  * @vm_flags: A combination of all the vma->vm    889  * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
970  *                                                890  *
971  * Quick test_and_clear_referenced for all map    891  * Quick test_and_clear_referenced for all mappings of a folio,
972  *                                                892  *
973  * Return: The number of mappings which refere    893  * Return: The number of mappings which referenced the folio. Return -1 if
974  * the function bailed out due to rmap lock co    894  * the function bailed out due to rmap lock contention.
975  */                                               895  */
976 int folio_referenced(struct folio *folio, int     896 int folio_referenced(struct folio *folio, int is_locked,
977                      struct mem_cgroup *memcg,    897                      struct mem_cgroup *memcg, unsigned long *vm_flags)
978 {                                                 898 {
979         bool we_locked = false;                !! 899         int we_locked = 0;
980         struct folio_referenced_arg pra = {       900         struct folio_referenced_arg pra = {
981                 .mapcount = folio_mapcount(fol    901                 .mapcount = folio_mapcount(folio),
982                 .memcg = memcg,                   902                 .memcg = memcg,
983         };                                        903         };
984         struct rmap_walk_control rwc = {          904         struct rmap_walk_control rwc = {
985                 .rmap_one = folio_referenced_o    905                 .rmap_one = folio_referenced_one,
986                 .arg = (void *)&pra,              906                 .arg = (void *)&pra,
987                 .anon_lock = folio_lock_anon_v    907                 .anon_lock = folio_lock_anon_vma_read,
988                 .try_lock = true,                 908                 .try_lock = true,
989                 .invalid_vma = invalid_folio_r << 
990         };                                        909         };
991                                                   910 
992         *vm_flags = 0;                            911         *vm_flags = 0;
993         if (!pra.mapcount)                        912         if (!pra.mapcount)
994                 return 0;                         913                 return 0;
995                                                   914 
996         if (!folio_raw_mapping(folio))            915         if (!folio_raw_mapping(folio))
997                 return 0;                         916                 return 0;
998                                                   917 
999         if (!is_locked && (!folio_test_anon(fo    918         if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
1000                 we_locked = folio_trylock(fol    919                 we_locked = folio_trylock(folio);
1001                 if (!we_locked)                  920                 if (!we_locked)
1002                         return 1;                921                         return 1;
1003         }                                        922         }
1004                                                  923 
                                                   >> 924         /*
                                                   >> 925          * If we are reclaiming on behalf of a cgroup, skip
                                                   >> 926          * counting on behalf of references from different
                                                   >> 927          * cgroups
                                                   >> 928          */
                                                   >> 929         if (memcg) {
                                                   >> 930                 rwc.invalid_vma = invalid_folio_referenced_vma;
                                                   >> 931         }
                                                   >> 932 
1005         rmap_walk(folio, &rwc);                  933         rmap_walk(folio, &rwc);
1006         *vm_flags = pra.vm_flags;                934         *vm_flags = pra.vm_flags;
1007                                                  935 
1008         if (we_locked)                           936         if (we_locked)
1009                 folio_unlock(folio);             937                 folio_unlock(folio);
1010                                                  938 
1011         return rwc.contended ? -1 : pra.refer    939         return rwc.contended ? -1 : pra.referenced;
1012 }                                                940 }
1013                                                  941 
1014 static int page_vma_mkclean_one(struct page_v    942 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
1015 {                                                943 {
1016         int cleaned = 0;                         944         int cleaned = 0;
1017         struct vm_area_struct *vma = pvmw->vm    945         struct vm_area_struct *vma = pvmw->vma;
1018         struct mmu_notifier_range range;         946         struct mmu_notifier_range range;
1019         unsigned long address = pvmw->address    947         unsigned long address = pvmw->address;
1020                                                  948 
1021         /*                                       949         /*
1022          * We have to assume the worse case i    950          * We have to assume the worse case ie pmd for invalidation. Note that
1023          * the folio can not be freed from th    951          * the folio can not be freed from this function.
1024          */                                      952          */
1025         mmu_notifier_range_init(&range, MMU_N !! 953         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1026                                 vma->vm_mm, a !! 954                                 0, vma, vma->vm_mm, address,
                                                   >> 955                                 vma_address_end(pvmw));
1027         mmu_notifier_invalidate_range_start(&    956         mmu_notifier_invalidate_range_start(&range);
1028                                                  957 
1029         while (page_vma_mapped_walk(pvmw)) {     958         while (page_vma_mapped_walk(pvmw)) {
1030                 int ret = 0;                     959                 int ret = 0;
1031                                                  960 
1032                 address = pvmw->address;         961                 address = pvmw->address;
1033                 if (pvmw->pte) {                 962                 if (pvmw->pte) {
                                                   >> 963                         pte_t entry;
1034                         pte_t *pte = pvmw->pt    964                         pte_t *pte = pvmw->pte;
1035                         pte_t entry = ptep_ge << 
1036                                                  965 
1037                         if (!pte_dirty(entry) !! 966                         if (!pte_dirty(*pte) && !pte_write(*pte))
1038                                 continue;        967                                 continue;
1039                                                  968 
1040                         flush_cache_page(vma, !! 969                         flush_cache_page(vma, address, pte_pfn(*pte));
1041                         entry = ptep_clear_fl    970                         entry = ptep_clear_flush(vma, address, pte);
1042                         entry = pte_wrprotect    971                         entry = pte_wrprotect(entry);
1043                         entry = pte_mkclean(e    972                         entry = pte_mkclean(entry);
1044                         set_pte_at(vma->vm_mm    973                         set_pte_at(vma->vm_mm, address, pte, entry);
1045                         ret = 1;                 974                         ret = 1;
1046                 } else {                         975                 } else {
1047 #ifdef CONFIG_TRANSPARENT_HUGEPAGE               976 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1048                         pmd_t *pmd = pvmw->pm    977                         pmd_t *pmd = pvmw->pmd;
1049                         pmd_t entry;             978                         pmd_t entry;
1050                                                  979 
1051                         if (!pmd_dirty(*pmd)     980                         if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
1052                                 continue;        981                                 continue;
1053                                                  982 
1054                         flush_cache_range(vma    983                         flush_cache_range(vma, address,
1055                                           add    984                                           address + HPAGE_PMD_SIZE);
1056                         entry = pmdp_invalida    985                         entry = pmdp_invalidate(vma, address, pmd);
1057                         entry = pmd_wrprotect    986                         entry = pmd_wrprotect(entry);
1058                         entry = pmd_mkclean(e    987                         entry = pmd_mkclean(entry);
1059                         set_pmd_at(vma->vm_mm    988                         set_pmd_at(vma->vm_mm, address, pmd, entry);
1060                         ret = 1;                 989                         ret = 1;
1061 #else                                            990 #else
1062                         /* unexpected pmd-map    991                         /* unexpected pmd-mapped folio? */
1063                         WARN_ON_ONCE(1);         992                         WARN_ON_ONCE(1);
1064 #endif                                           993 #endif
1065                 }                                994                 }
1066                                                  995 
                                                   >> 996                 /*
                                                   >> 997                  * No need to call mmu_notifier_invalidate_range() as we are
                                                   >> 998                  * downgrading page table protection not changing it to point
                                                   >> 999                  * to a new page.
                                                   >> 1000                  *
                                                   >> 1001                  * See Documentation/mm/mmu_notifier.rst
                                                   >> 1002                  */
1067                 if (ret)                         1003                 if (ret)
1068                         cleaned++;               1004                         cleaned++;
1069         }                                        1005         }
1070                                                  1006 
1071         mmu_notifier_invalidate_range_end(&ra    1007         mmu_notifier_invalidate_range_end(&range);
1072                                                  1008 
1073         return cleaned;                          1009         return cleaned;
1074 }                                                1010 }
1075                                                  1011 
1076 static bool page_mkclean_one(struct folio *fo    1012 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1077                              unsigned long ad    1013                              unsigned long address, void *arg)
1078 {                                                1014 {
1079         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vm    1015         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1080         int *cleaned = arg;                      1016         int *cleaned = arg;
1081                                                  1017 
1082         *cleaned += page_vma_mkclean_one(&pvm    1018         *cleaned += page_vma_mkclean_one(&pvmw);
1083                                                  1019 
1084         return true;                             1020         return true;
1085 }                                                1021 }
1086                                                  1022 
1087 static bool invalid_mkclean_vma(struct vm_are    1023 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1088 {                                                1024 {
1089         if (vma->vm_flags & VM_SHARED)           1025         if (vma->vm_flags & VM_SHARED)
1090                 return false;                    1026                 return false;
1091                                                  1027 
1092         return true;                             1028         return true;
1093 }                                                1029 }
1094                                                  1030 
1095 int folio_mkclean(struct folio *folio)           1031 int folio_mkclean(struct folio *folio)
1096 {                                                1032 {
1097         int cleaned = 0;                         1033         int cleaned = 0;
1098         struct address_space *mapping;           1034         struct address_space *mapping;
1099         struct rmap_walk_control rwc = {         1035         struct rmap_walk_control rwc = {
1100                 .arg = (void *)&cleaned,         1036                 .arg = (void *)&cleaned,
1101                 .rmap_one = page_mkclean_one,    1037                 .rmap_one = page_mkclean_one,
1102                 .invalid_vma = invalid_mkclea    1038                 .invalid_vma = invalid_mkclean_vma,
1103         };                                       1039         };
1104                                                  1040 
1105         BUG_ON(!folio_test_locked(folio));       1041         BUG_ON(!folio_test_locked(folio));
1106                                                  1042 
1107         if (!folio_mapped(folio))                1043         if (!folio_mapped(folio))
1108                 return 0;                        1044                 return 0;
1109                                                  1045 
1110         mapping = folio_mapping(folio);          1046         mapping = folio_mapping(folio);
1111         if (!mapping)                            1047         if (!mapping)
1112                 return 0;                        1048                 return 0;
1113                                                  1049 
1114         rmap_walk(folio, &rwc);                  1050         rmap_walk(folio, &rwc);
1115                                                  1051 
1116         return cleaned;                          1052         return cleaned;
1117 }                                                1053 }
1118 EXPORT_SYMBOL_GPL(folio_mkclean);                1054 EXPORT_SYMBOL_GPL(folio_mkclean);
1119                                                  1055 
1120 /**                                              1056 /**
1121  * pfn_mkclean_range - Cleans the PTEs (inclu    1057  * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1122  *                     [@pfn, @pfn + @nr_page    1058  *                     [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1123  *                     within the @vma of sha    1059  *                     within the @vma of shared mappings. And since clean PTEs
1124  *                     should also be readonl    1060  *                     should also be readonly, write protects them too.
1125  * @pfn: start pfn.                              1061  * @pfn: start pfn.
1126  * @nr_pages: number of physically contiguous    1062  * @nr_pages: number of physically contiguous pages srarting with @pfn.
1127  * @pgoff: page offset that the @pfn mapped w    1063  * @pgoff: page offset that the @pfn mapped with.
1128  * @vma: vma that @pfn mapped within.            1064  * @vma: vma that @pfn mapped within.
1129  *                                               1065  *
1130  * Returns the number of cleaned PTEs (includ    1066  * Returns the number of cleaned PTEs (including PMDs).
1131  */                                              1067  */
1132 int pfn_mkclean_range(unsigned long pfn, unsi    1068 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
1133                       struct vm_area_struct *    1069                       struct vm_area_struct *vma)
1134 {                                                1070 {
1135         struct page_vma_mapped_walk pvmw = {     1071         struct page_vma_mapped_walk pvmw = {
1136                 .pfn            = pfn,           1072                 .pfn            = pfn,
1137                 .nr_pages       = nr_pages,      1073                 .nr_pages       = nr_pages,
1138                 .pgoff          = pgoff,         1074                 .pgoff          = pgoff,
1139                 .vma            = vma,           1075                 .vma            = vma,
1140                 .flags          = PVMW_SYNC,     1076                 .flags          = PVMW_SYNC,
1141         };                                       1077         };
1142                                                  1078 
1143         if (invalid_mkclean_vma(vma, NULL))      1079         if (invalid_mkclean_vma(vma, NULL))
1144                 return 0;                        1080                 return 0;
1145                                                  1081 
1146         pvmw.address = vma_address(vma, pgoff !! 1082         pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
1147         VM_BUG_ON_VMA(pvmw.address == -EFAULT    1083         VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1148                                                  1084 
1149         return page_vma_mkclean_one(&pvmw);      1085         return page_vma_mkclean_one(&pvmw);
1150 }                                                1086 }
1151                                                  1087 
1152 static __always_inline unsigned int __folio_a !! 1088 int total_compound_mapcount(struct page *head)
1153                 struct page *page, int nr_pag !! 1089 {
1154                 int *nr_pmdmapped)            !! 1090         int mapcount = head_compound_mapcount(head);
1155 {                                             !! 1091         int nr_subpages;
1156         atomic_t *mapped = &folio->_nr_pages_ !! 1092         int i;
1157         const int orig_nr_pages = nr_pages;   !! 1093 
1158         int first = 0, nr = 0;                !! 1094         /* In the common case, avoid the loop when no subpages mapped by PTE */
1159                                               !! 1095         if (head_subpages_mapcount(head) == 0)
1160         __folio_rmap_sanity_checks(folio, pag !! 1096                 return mapcount;
1161                                               !! 1097         /*
1162         switch (level) {                      !! 1098          * Add all the PTE mappings of those subpages mapped by PTE.
1163         case RMAP_LEVEL_PTE:                  !! 1099          * Limit the loop, knowing that only subpages_mapcount are mapped?
1164                 if (!folio_test_large(folio)) !! 1100          * Perhaps: given all the raciness, that may be a good or a bad idea.
1165                         nr = atomic_inc_and_t !! 1101          */
1166                         break;                !! 1102         nr_subpages = thp_nr_pages(head);
1167                 }                             !! 1103         for (i = 0; i < nr_subpages; i++)
1168                                               !! 1104                 mapcount += atomic_read(&head[i]._mapcount);
1169                 do {                          !! 1105 
1170                         first += atomic_inc_a !! 1106         /* But each of those _mapcounts was based on -1 */
1171                 } while (page++, --nr_pages > !! 1107         mapcount += nr_subpages;
1172                                               !! 1108         return mapcount;
1173                 if (first &&                  << 
1174                     atomic_add_return_relaxed << 
1175                         nr = first;           << 
1176                                               << 
1177                 atomic_add(orig_nr_pages, &fo << 
1178                 break;                        << 
1179         case RMAP_LEVEL_PMD:                  << 
1180                 first = atomic_inc_and_test(& << 
1181                 if (first) {                  << 
1182                         nr = atomic_add_retur << 
1183                         if (likely(nr < ENTIR << 
1184                                 *nr_pmdmapped << 
1185                                 nr = *nr_pmdm << 
1186                                 /* Raced ahea << 
1187                                 if (unlikely( << 
1188                                         nr =  << 
1189                         } else {              << 
1190                                 /* Raced ahea << 
1191                                 nr = 0;       << 
1192                         }                     << 
1193                 }                             << 
1194                 atomic_inc(&folio->_large_map << 
1195                 break;                        << 
1196         }                                     << 
1197         return nr;                            << 
1198 }                                                1109 }
1199                                                  1110 
1200 /**                                              1111 /**
1201  * folio_move_anon_rmap - move a folio to our !! 1112  * page_move_anon_rmap - move a page to our anon_vma
1202  * @folio:      The folio to move to our anon !! 1113  * @page:       the page to move to our anon_vma
1203  * @vma:        The vma the folio belongs to  !! 1114  * @vma:        the vma the page belongs to
1204  *                                            !! 1115  *
1205  * When a folio belongs exclusively to one pr !! 1116  * When a page belongs exclusively to one process after a COW event,
1206  * that folio can be moved into the anon_vma  !! 1117  * that page can be moved into the anon_vma that belongs to just that
1207  * process, so the rmap code will not search  !! 1118  * process, so the rmap code will not search the parent or sibling
                                                   >> 1119  * processes.
1208  */                                              1120  */
1209 void folio_move_anon_rmap(struct folio *folio !! 1121 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1210 {                                                1122 {
1211         void *anon_vma = vma->anon_vma;          1123         void *anon_vma = vma->anon_vma;
                                                   >> 1124         struct folio *folio = page_folio(page);
1212                                                  1125 
1213         VM_BUG_ON_FOLIO(!folio_test_locked(fo    1126         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1214         VM_BUG_ON_VMA(!anon_vma, vma);           1127         VM_BUG_ON_VMA(!anon_vma, vma);
1215                                                  1128 
1216         anon_vma += PAGE_MAPPING_ANON;           1129         anon_vma += PAGE_MAPPING_ANON;
1217         /*                                       1130         /*
1218          * Ensure that anon_vma and the PAGE_    1131          * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1219          * simultaneously, so a concurrent re    1132          * simultaneously, so a concurrent reader (eg folio_referenced()'s
1220          * folio_test_anon()) will not see on    1133          * folio_test_anon()) will not see one without the other.
1221          */                                      1134          */
1222         WRITE_ONCE(folio->mapping, anon_vma);    1135         WRITE_ONCE(folio->mapping, anon_vma);
                                                   >> 1136         SetPageAnonExclusive(page);
1223 }                                                1137 }
1224                                                  1138 
1225 /**                                              1139 /**
1226  * __folio_set_anon - set up a new anonymous  !! 1140  * __page_set_anon_rmap - set up new anonymous rmap
1227  * @folio:      The folio to set up the new a !! 1141  * @page:       Page or Hugepage to add to rmap
1228  * @vma:        VM area to add the folio to.  !! 1142  * @vma:        VM area to add page to.
1229  * @address:    User virtual address of the m !! 1143  * @address:    User virtual address of the mapping     
1230  * @exclusive:  Whether the folio is exclusiv !! 1144  * @exclusive:  the page is exclusively owned by the current process
1231  */                                              1145  */
1232 static void __folio_set_anon(struct folio *fo !! 1146 static void __page_set_anon_rmap(struct page *page,
1233                              unsigned long ad !! 1147         struct vm_area_struct *vma, unsigned long address, int exclusive)
1234 {                                                1148 {
1235         struct anon_vma *anon_vma = vma->anon    1149         struct anon_vma *anon_vma = vma->anon_vma;
1236                                                  1150 
1237         BUG_ON(!anon_vma);                       1151         BUG_ON(!anon_vma);
1238                                                  1152 
                                                   >> 1153         if (PageAnon(page))
                                                   >> 1154                 goto out;
                                                   >> 1155 
1239         /*                                       1156         /*
1240          * If the folio isn't exclusive to th !! 1157          * If the page isn't exclusively mapped into this vma,
1241          * possible anon_vma for the folio ma !! 1158          * we must use the _oldest_ possible anon_vma for the
                                                   >> 1159          * page mapping!
1242          */                                      1160          */
1243         if (!exclusive)                          1161         if (!exclusive)
1244                 anon_vma = anon_vma->root;       1162                 anon_vma = anon_vma->root;
1245                                                  1163 
1246         /*                                       1164         /*
1247          * page_idle does a lockless/optimist !! 1165          * page_idle does a lockless/optimistic rmap scan on page->mapping.
1248          * Make sure the compiler doesn't spl    1166          * Make sure the compiler doesn't split the stores of anon_vma and
1249          * the PAGE_MAPPING_ANON type identif    1167          * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
1250          * could mistake the mapping for a st    1168          * could mistake the mapping for a struct address_space and crash.
1251          */                                      1169          */
1252         anon_vma = (void *) anon_vma + PAGE_M    1170         anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1253         WRITE_ONCE(folio->mapping, (struct ad !! 1171         WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1254         folio->index = linear_page_index(vma, !! 1172         page->index = linear_page_index(vma, address);
                                                   >> 1173 out:
                                                   >> 1174         if (exclusive)
                                                   >> 1175                 SetPageAnonExclusive(page);
1255 }                                                1176 }
1256                                                  1177 
1257 /**                                              1178 /**
1258  * __page_check_anon_rmap - sanity check anon    1179  * __page_check_anon_rmap - sanity check anonymous rmap addition
1259  * @folio:      The folio containing @page.   !! 1180  * @page:       the page to add the mapping to
1260  * @page:       the page to check the mapping << 
1261  * @vma:        the vm area in which the mapp    1181  * @vma:        the vm area in which the mapping is added
1262  * @address:    the user virtual address mapp    1182  * @address:    the user virtual address mapped
1263  */                                              1183  */
1264 static void __page_check_anon_rmap(struct fol !! 1184 static void __page_check_anon_rmap(struct page *page,
1265         struct vm_area_struct *vma, unsigned     1185         struct vm_area_struct *vma, unsigned long address)
1266 {                                                1186 {
                                                   >> 1187         struct folio *folio = page_folio(page);
1267         /*                                       1188         /*
1268          * The page's anon-rmap details (mapp    1189          * The page's anon-rmap details (mapping and index) are guaranteed to
1269          * be set up correctly at this point.    1190          * be set up correctly at this point.
1270          *                                       1191          *
1271          * We have exclusion against folio_ad !! 1192          * We have exclusion against page_add_anon_rmap because the caller
1272          * always holds the page locked.         1193          * always holds the page locked.
1273          *                                       1194          *
1274          * We have exclusion against folio_ad !! 1195          * We have exclusion against page_add_new_anon_rmap because those pages
1275          * are initially only visible via the    1196          * are initially only visible via the pagetables, and the pte is locked
1276          * over the call to folio_add_new_ano !! 1197          * over the call to page_add_new_anon_rmap.
1277          */                                      1198          */
1278         VM_BUG_ON_FOLIO(folio_anon_vma(folio)    1199         VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1279                         folio);                  1200                         folio);
1280         VM_BUG_ON_PAGE(page_to_pgoff(page) !=    1201         VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1281                        page);                    1202                        page);
1282 }                                                1203 }
1283                                                  1204 
1284 static void __folio_mod_stat(struct folio *fo !! 1205 /**
                                                   >> 1206  * page_add_anon_rmap - add pte mapping to an anonymous page
                                                   >> 1207  * @page:       the page to add the mapping to
                                                   >> 1208  * @vma:        the vm area in which the mapping is added
                                                   >> 1209  * @address:    the user virtual address mapped
                                                   >> 1210  * @flags:      the rmap flags
                                                   >> 1211  *
                                                   >> 1212  * The caller needs to hold the pte lock, and the page must be locked in
                                                   >> 1213  * the anon_vma case: to serialize mapping,index checking after setting,
                                                   >> 1214  * and to ensure that PageAnon is not being upgraded racily to PageKsm
                                                   >> 1215  * (but PageKsm is never downgraded to PageAnon).
                                                   >> 1216  */
                                                   >> 1217 void page_add_anon_rmap(struct page *page,
                                                   >> 1218         struct vm_area_struct *vma, unsigned long address, rmap_t flags)
1285 {                                                1219 {
1286         int idx;                              !! 1220         atomic_t *mapped;
1287                                               !! 1221         int nr = 0, nr_pmdmapped = 0;
1288         if (nr) {                             !! 1222         bool compound = flags & RMAP_COMPOUND;
1289                 idx = folio_test_anon(folio)  !! 1223         bool first = true;
1290                 __lruvec_stat_mod_folio(folio !! 1224 
1291         }                                     !! 1225         if (unlikely(PageKsm(page)))
1292         if (nr_pmdmapped) {                   !! 1226                 lock_page_memcg(page);
1293                 if (folio_test_anon(folio)) { !! 1227 
1294                         idx = NR_ANON_THPS;   !! 1228         /* Is page being mapped by PTE? Is this its first map to be added? */
1295                         __lruvec_stat_mod_fol !! 1229         if (likely(!compound)) {
1296                 } else {                      !! 1230                 first = atomic_inc_and_test(&page->_mapcount);
1297                         /* NR_*_PMDMAPPED are !! 1231                 nr = first;
1298                         idx = folio_test_swap !! 1232                 if (first && PageCompound(page)) {
1299                                 NR_SHMEM_PMDM !! 1233                         mapped = subpages_mapcount_ptr(compound_head(page));
1300                         __mod_node_page_state !! 1234                         nr = atomic_inc_return_relaxed(mapped);
1301                                               !! 1235                         nr = (nr < COMPOUND_MAPPED);
1302                 }                                1236                 }
1303         }                                     !! 1237         } else if (PageTransHuge(page)) {
1304 }                                             !! 1238                 /* That test is redundant: it's for safety or to optimize out */
1305                                               << 
1306 static __always_inline void __folio_add_anon_ << 
1307                 struct page *page, int nr_pag << 
1308                 unsigned long address, rmap_t << 
1309 {                                             << 
1310         int i, nr, nr_pmdmapped = 0;          << 
1311                                               << 
1312         VM_WARN_ON_FOLIO(!folio_test_anon(fol << 
1313                                               << 
1314         nr = __folio_add_rmap(folio, page, nr << 
1315                                               << 
1316         if (likely(!folio_test_ksm(folio)))   << 
1317                 __page_check_anon_rmap(folio, << 
1318                                                  1239 
1319         __folio_mod_stat(folio, nr, nr_pmdmap !! 1240                 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1320                                               !! 1241                 if (first) {
1321         if (flags & RMAP_EXCLUSIVE) {         !! 1242                         mapped = subpages_mapcount_ptr(page);
1322                 switch (level) {              !! 1243                         nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
1323                 case RMAP_LEVEL_PTE:          !! 1244                         if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1324                         for (i = 0; i < nr_pa !! 1245                                 nr_pmdmapped = thp_nr_pages(page);
1325                                 SetPageAnonEx !! 1246                                 nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
1326                         break;                !! 1247                                 /* Raced ahead of a remove and another add? */
1327                 case RMAP_LEVEL_PMD:          !! 1248                                 if (unlikely(nr < 0))
1328                         SetPageAnonExclusive( !! 1249                                         nr = 0;
1329                         break;                !! 1250                         } else {
                                                   >> 1251                                 /* Raced ahead of a remove of COMPOUND_MAPPED */
                                                   >> 1252                                 nr = 0;
                                                   >> 1253                         }
1330                 }                                1254                 }
1331         }                                        1255         }
1332         for (i = 0; i < nr_pages; i++) {      << 
1333                 struct page *cur_page = page  << 
1334                                                  1256 
1335                 /* While PTE-mapping a THP we !! 1257         VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
1336                 VM_WARN_ON_FOLIO((atomic_read !! 1258         VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
1337                                   (folio_test << 
1338                                    folio_enti << 
1339                                  PageAnonExcl << 
1340         }                                     << 
1341                                                  1259 
1342         /*                                    !! 1260         if (nr_pmdmapped)
1343          * For large folio, only mlock it if  !! 1261                 __mod_lruvec_page_state(page, NR_ANON_THPS, nr_pmdmapped);
1344          * not easy to check whether the larg !! 1262         if (nr)
1345          * here. Only mlock normal 4K folio a !! 1263                 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1346          * large folio.                       !! 1264 
1347          */                                   !! 1265         if (unlikely(PageKsm(page)))
1348         if (!folio_test_large(folio))         !! 1266                 unlock_page_memcg(page);
1349                 mlock_vma_folio(folio, vma);  !! 1267 
1350 }                                             !! 1268         /* address might be in next vma when migration races vma_adjust */
1351                                               !! 1269         else if (first)
1352 /**                                           !! 1270                 __page_set_anon_rmap(page, vma, address,
1353  * folio_add_anon_rmap_ptes - add PTE mapping !! 1271                                      !!(flags & RMAP_EXCLUSIVE));
1354  * @folio:      The folio to add the mappings !! 1272         else
1355  * @page:       The first page to add         !! 1273                 __page_check_anon_rmap(page, vma, address);
1356  * @nr_pages:   The number of pages which wil << 
1357  * @vma:        The vm area in which the mapp << 
1358  * @address:    The user virtual address of t << 
1359  * @flags:      The rmap flags                << 
1360  *                                            << 
1361  * The page range of folio is defined by [fir << 
1362  *                                            << 
1363  * The caller needs to hold the page table lo << 
1364  * the anon_vma case: to serialize mapping,in << 
1365  * and to ensure that an anon folio is not be << 
1366  * (but KSM folios are never downgraded).     << 
1367  */                                           << 
1368 void folio_add_anon_rmap_ptes(struct folio *f << 
1369                 int nr_pages, struct vm_area_ << 
1370                 rmap_t flags)                 << 
1371 {                                             << 
1372         __folio_add_anon_rmap(folio, page, nr << 
1373                               RMAP_LEVEL_PTE) << 
1374 }                                             << 
1375                                                  1274 
1376 /**                                           !! 1275         mlock_vma_page(page, vma, compound);
1377  * folio_add_anon_rmap_pmd - add a PMD mappin << 
1378  * @folio:      The folio to add the mapping  << 
1379  * @page:       The first page to add         << 
1380  * @vma:        The vm area in which the mapp << 
1381  * @address:    The user virtual address of t << 
1382  * @flags:      The rmap flags                << 
1383  *                                            << 
1384  * The page range of folio is defined by [fir << 
1385  *                                            << 
1386  * The caller needs to hold the page table lo << 
1387  * the anon_vma case: to serialize mapping,in << 
1388  */                                           << 
1389 void folio_add_anon_rmap_pmd(struct folio *fo << 
1390                 struct vm_area_struct *vma, u << 
1391 {                                             << 
1392 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            << 
1393         __folio_add_anon_rmap(folio, page, HP << 
1394                               RMAP_LEVEL_PMD) << 
1395 #else                                         << 
1396         WARN_ON_ONCE(true);                   << 
1397 #endif                                        << 
1398 }                                                1276 }
1399                                                  1277 
1400 /**                                              1278 /**
1401  * folio_add_new_anon_rmap - Add mapping to a !! 1279  * page_add_new_anon_rmap - add mapping to a new anonymous page
1402  * @folio:      The folio to add the mapping  !! 1280  * @page:       the page to add the mapping to
1403  * @vma:        the vm area in which the mapp    1281  * @vma:        the vm area in which the mapping is added
1404  * @address:    the user virtual address mapp    1282  * @address:    the user virtual address mapped
1405  * @flags:      The rmap flags                << 
1406  *                                               1283  *
1407  * Like folio_add_anon_rmap_*() but must only !! 1284  * If it's a compound page, it is accounted as a compound page. As the page
1408  * This means the inc-and-test can be bypasse !! 1285  * is new, it's assume to get mapped exclusively by a single process.
1409  * The folio doesn't necessarily need to be l << 
1410  * unless two threads map it concurrently. Ho << 
1411  * locked if it's shared.                     << 
1412  *                                               1286  *
1413  * If the folio is pmd-mappable, it is accoun !! 1287  * Same as page_add_anon_rmap but must only be called on *new* pages.
                                                   >> 1288  * This means the inc-and-test can be bypassed.
                                                   >> 1289  * Page does not have to be locked.
1414  */                                              1290  */
1415 void folio_add_new_anon_rmap(struct folio *fo !! 1291 void page_add_new_anon_rmap(struct page *page,
1416                 unsigned long address, rmap_t !! 1292         struct vm_area_struct *vma, unsigned long address)
1417 {                                                1293 {
1418         const int nr = folio_nr_pages(folio); !! 1294         int nr;
1419         const bool exclusive = flags & RMAP_E << 
1420         int nr_pmdmapped = 0;                 << 
1421                                               << 
1422         VM_WARN_ON_FOLIO(folio_test_hugetlb(f << 
1423         VM_WARN_ON_FOLIO(!exclusive && !folio << 
1424         VM_BUG_ON_VMA(address < vma->vm_start << 
1425                         address + (nr << PAGE << 
1426                                               << 
1427         /*                                    << 
1428          * VM_DROPPABLE mappings don't swap;  << 
1429          * under memory pressure.             << 
1430          */                                   << 
1431         if (!folio_test_swapbacked(folio) &&  << 
1432                 __folio_set_swapbacked(folio) << 
1433         __folio_set_anon(folio, vma, address, << 
1434                                                  1295 
1435         if (likely(!folio_test_large(folio))) !! 1296         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1436                 /* increment count (starts at !! 1297         __SetPageSwapBacked(page);
1437                 atomic_set(&folio->_mapcount, << 
1438                 if (exclusive)                << 
1439                         SetPageAnonExclusive( << 
1440         } else if (!folio_test_pmd_mappable(f << 
1441                 int i;                        << 
1442                                               << 
1443                 for (i = 0; i < nr; i++) {    << 
1444                         struct page *page = f << 
1445                                               << 
1446                         /* increment count (s << 
1447                         atomic_set(&page->_ma << 
1448                         if (exclusive)        << 
1449                                 SetPageAnonEx << 
1450                 }                             << 
1451                                                  1298 
                                                   >> 1299         if (likely(!PageCompound(page))) {
1452                 /* increment count (starts at    1300                 /* increment count (starts at -1) */
1453                 atomic_set(&folio->_large_map !! 1301                 atomic_set(&page->_mapcount, 0);
1454                 atomic_set(&folio->_nr_pages_ !! 1302                 nr = 1;
1455         } else {                                 1303         } else {
                                                   >> 1304                 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1456                 /* increment count (starts at    1305                 /* increment count (starts at -1) */
1457                 atomic_set(&folio->_entire_ma !! 1306                 atomic_set(compound_mapcount_ptr(page), 0);
1458                 /* increment count (starts at !! 1307                 atomic_set(subpages_mapcount_ptr(page), COMPOUND_MAPPED);
1459                 atomic_set(&folio->_large_map !! 1308                 nr = thp_nr_pages(page);
1460                 atomic_set(&folio->_nr_pages_ !! 1309                 __mod_lruvec_page_state(page, NR_ANON_THPS, nr);
1461                 if (exclusive)                << 
1462                         SetPageAnonExclusive( << 
1463                 nr_pmdmapped = nr;            << 
1464         }                                        1310         }
1465                                                  1311 
1466         __folio_mod_stat(folio, nr, nr_pmdmap !! 1312         __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1467         mod_mthp_stat(folio_order(folio), MTH !! 1313         __page_set_anon_rmap(page, vma, address, 1);
1468 }                                                1314 }
1469                                                  1315 
1470 static __always_inline void __folio_add_file_ !! 1316 /**
1471                 struct page *page, int nr_pag !! 1317  * page_add_file_rmap - add pte mapping to a file page
1472                 enum rmap_level level)        !! 1318  * @page:       the page to add the mapping to
                                                   >> 1319  * @vma:        the vm area in which the mapping is added
                                                   >> 1320  * @compound:   charge the page as compound or small page
                                                   >> 1321  *
                                                   >> 1322  * The caller needs to hold the pte lock.
                                                   >> 1323  */
                                                   >> 1324 void page_add_file_rmap(struct page *page,
                                                   >> 1325         struct vm_area_struct *vma, bool compound)
1473 {                                                1326 {
1474         int nr, nr_pmdmapped = 0;             !! 1327         atomic_t *mapped;
                                                   >> 1328         int nr = 0, nr_pmdmapped = 0;
                                                   >> 1329         bool first;
1475                                                  1330 
1476         VM_WARN_ON_FOLIO(folio_test_anon(foli !! 1331         VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
                                                   >> 1332         lock_page_memcg(page);
1477                                                  1333 
1478         nr = __folio_add_rmap(folio, page, nr !! 1334         /* Is page being mapped by PTE? Is this its first map to be added? */
1479         __folio_mod_stat(folio, nr, nr_pmdmap !! 1335         if (likely(!compound)) {
                                                   >> 1336                 first = atomic_inc_and_test(&page->_mapcount);
                                                   >> 1337                 nr = first;
                                                   >> 1338                 if (first && PageCompound(page)) {
                                                   >> 1339                         mapped = subpages_mapcount_ptr(compound_head(page));
                                                   >> 1340                         nr = atomic_inc_return_relaxed(mapped);
                                                   >> 1341                         nr = (nr < COMPOUND_MAPPED);
                                                   >> 1342                 }
                                                   >> 1343         } else if (PageTransHuge(page)) {
                                                   >> 1344                 /* That test is redundant: it's for safety or to optimize out */
1480                                                  1345 
1481         /* See comments in folio_add_anon_rma !! 1346                 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1482         if (!folio_test_large(folio))         !! 1347                 if (first) {
1483                 mlock_vma_folio(folio, vma);  !! 1348                         mapped = subpages_mapcount_ptr(page);
1484 }                                             !! 1349                         nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
                                                   >> 1350                         if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
                                                   >> 1351                                 nr_pmdmapped = thp_nr_pages(page);
                                                   >> 1352                                 nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
                                                   >> 1353                                 /* Raced ahead of a remove and another add? */
                                                   >> 1354                                 if (unlikely(nr < 0))
                                                   >> 1355                                         nr = 0;
                                                   >> 1356                         } else {
                                                   >> 1357                                 /* Raced ahead of a remove of COMPOUND_MAPPED */
                                                   >> 1358                                 nr = 0;
                                                   >> 1359                         }
                                                   >> 1360                 }
                                                   >> 1361         }
1485                                                  1362 
1486 /**                                           !! 1363         if (nr_pmdmapped)
1487  * folio_add_file_rmap_ptes - add PTE mapping !! 1364                 __mod_lruvec_page_state(page, PageSwapBacked(page) ?
1488  * @folio:      The folio to add the mappings !! 1365                         NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
1489  * @page:       The first page to add         !! 1366         if (nr)
1490  * @nr_pages:   The number of pages that will !! 1367                 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1491  * @vma:        The vm area in which the mapp !! 1368         unlock_page_memcg(page);
1492  *                                            !! 1369 
1493  * The page range of the folio is defined by  !! 1370         mlock_vma_page(page, vma, compound);
1494  *                                            << 
1495  * The caller needs to hold the page table lo << 
1496  */                                           << 
1497 void folio_add_file_rmap_ptes(struct folio *f << 
1498                 int nr_pages, struct vm_area_ << 
1499 {                                             << 
1500         __folio_add_file_rmap(folio, page, nr << 
1501 }                                                1371 }
1502                                                  1372 
1503 /**                                              1373 /**
1504  * folio_add_file_rmap_pmd - add a PMD mappin !! 1374  * page_remove_rmap - take down pte mapping from a page
1505  * @folio:      The folio to add the mapping  !! 1375  * @page:       page to remove mapping from
1506  * @page:       The first page to add         !! 1376  * @vma:        the vm area from which the mapping is removed
1507  * @vma:        The vm area in which the mapp !! 1377  * @compound:   uncharge the page as compound or small page
1508  *                                            !! 1378  *
1509  * The page range of the folio is defined by  !! 1379  * The caller needs to hold the pte lock.
1510  *                                            !! 1380  */
1511  * The caller needs to hold the page table lo !! 1381 void page_remove_rmap(struct page *page,
1512  */                                           !! 1382         struct vm_area_struct *vma, bool compound)
1513 void folio_add_file_rmap_pmd(struct folio *fo !! 1383 {
1514                 struct vm_area_struct *vma)   !! 1384         atomic_t *mapped;
1515 {                                             !! 1385         int nr = 0, nr_pmdmapped = 0;
1516 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            !! 1386         bool last;
1517         __folio_add_file_rmap(folio, page, HP !! 1387 
1518 #else                                         !! 1388         VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1519         WARN_ON_ONCE(true);                   !! 1389 
1520 #endif                                        !! 1390         /* Hugetlb pages are not counted in NR_*MAPPED */
1521 }                                             !! 1391         if (unlikely(PageHuge(page))) {
                                                   >> 1392                 /* hugetlb pages are always mapped with pmds */
                                                   >> 1393                 atomic_dec(compound_mapcount_ptr(page));
                                                   >> 1394                 return;
                                                   >> 1395         }
1522                                                  1396 
1523 static __always_inline void __folio_remove_rm !! 1397         lock_page_memcg(page);
1524                 struct page *page, int nr_pag !! 1398 
1525                 enum rmap_level level)        !! 1399         /* Is page being unmapped by PTE? Is this its last map to be removed? */
1526 {                                             !! 1400         if (likely(!compound)) {
1527         atomic_t *mapped = &folio->_nr_pages_ !! 1401                 last = atomic_add_negative(-1, &page->_mapcount);
1528         int last = 0, nr = 0, nr_pmdmapped =  !! 1402                 nr = last;
1529         bool partially_mapped = false;        !! 1403                 if (last && PageCompound(page)) {
1530                                               !! 1404                         mapped = subpages_mapcount_ptr(compound_head(page));
1531         __folio_rmap_sanity_checks(folio, pag !! 1405                         nr = atomic_dec_return_relaxed(mapped);
1532                                               !! 1406                         nr = (nr < COMPOUND_MAPPED);
1533         switch (level) {                      << 
1534         case RMAP_LEVEL_PTE:                  << 
1535                 if (!folio_test_large(folio)) << 
1536                         nr = atomic_add_negat << 
1537                         break;                << 
1538                 }                                1407                 }
                                                   >> 1408         } else if (PageTransHuge(page)) {
                                                   >> 1409                 /* That test is redundant: it's for safety or to optimize out */
1539                                                  1410 
1540                 atomic_sub(nr_pages, &folio-> !! 1411                 last = atomic_add_negative(-1, compound_mapcount_ptr(page));
1541                 do {                          << 
1542                         last += atomic_add_ne << 
1543                 } while (page++, --nr_pages > << 
1544                                               << 
1545                 if (last &&                   << 
1546                     atomic_sub_return_relaxed << 
1547                         nr = last;            << 
1548                                               << 
1549                 partially_mapped = nr && atom << 
1550                 break;                        << 
1551         case RMAP_LEVEL_PMD:                  << 
1552                 atomic_dec(&folio->_large_map << 
1553                 last = atomic_add_negative(-1 << 
1554                 if (last) {                      1412                 if (last) {
1555                         nr = atomic_sub_retur !! 1413                         mapped = subpages_mapcount_ptr(page);
1556                         if (likely(nr < ENTIR !! 1414                         nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
1557                                 nr_pmdmapped  !! 1415                         if (likely(nr < COMPOUND_MAPPED)) {
1558                                 nr = nr_pmdma !! 1416                                 nr_pmdmapped = thp_nr_pages(page);
                                                   >> 1417                                 nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
1559                                 /* Raced ahea    1418                                 /* Raced ahead of another remove and an add? */
1560                                 if (unlikely(    1419                                 if (unlikely(nr < 0))
1561                                         nr =     1420                                         nr = 0;
1562                         } else {                 1421                         } else {
1563                                 /* An add of  !! 1422                                 /* An add of COMPOUND_MAPPED raced ahead */
1564                                 nr = 0;          1423                                 nr = 0;
1565                         }                        1424                         }
1566                 }                                1425                 }
1567                                               << 
1568                 partially_mapped = nr && nr < << 
1569                 break;                        << 
1570         }                                        1426         }
1571                                                  1427 
1572         /*                                    !! 1428         if (nr_pmdmapped) {
1573          * Queue anon large folio for deferre !! 1429                 __mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_THPS :
1574          * the folio is unmapped and at least !! 1430                                 (PageSwapBacked(page) ? NR_SHMEM_PMDMAPPED :
1575          *                                    !! 1431                                 NR_FILE_PMDMAPPED), -nr_pmdmapped);
1576          * Check partially_mapped first to en !! 1432         }
1577          */                                   !! 1433         if (nr) {
1578         if (partially_mapped && folio_test_an !! 1434                 __mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_MAPPED :
1579             !folio_test_partially_mapped(foli !! 1435                                 NR_FILE_MAPPED, -nr);
1580                 deferred_split_folio(folio, t !! 1436                 /*
1581                                               !! 1437                  * Queue anon THP for deferred split if at least one small
1582         __folio_mod_stat(folio, -nr, -nr_pmdm !! 1438                  * page of the compound page is unmapped, but at least one
                                                   >> 1439                  * small page is still mapped.
                                                   >> 1440                  */
                                                   >> 1441                 if (PageTransCompound(page) && PageAnon(page))
                                                   >> 1442                         if (!compound || nr < nr_pmdmapped)
                                                   >> 1443                                 deferred_split_huge_page(compound_head(page));
                                                   >> 1444         }
1583                                                  1445 
1584         /*                                       1446         /*
1585          * It would be tidy to reset folio_te !! 1447          * It would be tidy to reset PageAnon mapping when fully unmapped,
1586          * unmapped, but that might overwrite !! 1448          * but that might overwrite a racing page_add_anon_rmap
1587          * which increments mapcount after us !! 1449          * which increments mapcount after us but sets mapping
1588          * so leave the reset to free_pages_p !! 1450          * before us: so leave the reset to free_pages_prepare,
1589          * it's only reliable while mapped.   !! 1451          * and remember that it's only reliable while mapped.
1590          */                                      1452          */
1591                                                  1453 
1592         munlock_vma_folio(folio, vma);        !! 1454         unlock_page_memcg(page);
1593 }                                             << 
1594                                               << 
1595 /**                                           << 
1596  * folio_remove_rmap_ptes - remove PTE mappin << 
1597  * @folio:      The folio to remove the mappi << 
1598  * @page:       The first page to remove      << 
1599  * @nr_pages:   The number of pages that will << 
1600  * @vma:        The vm area from which the ma << 
1601  *                                            << 
1602  * The page range of the folio is defined by  << 
1603  *                                            << 
1604  * The caller needs to hold the page table lo << 
1605  */                                           << 
1606 void folio_remove_rmap_ptes(struct folio *fol << 
1607                 int nr_pages, struct vm_area_ << 
1608 {                                             << 
1609         __folio_remove_rmap(folio, page, nr_p << 
1610 }                                             << 
1611                                                  1455 
1612 /**                                           !! 1456         munlock_vma_page(page, vma, compound);
1613  * folio_remove_rmap_pmd - remove a PMD mappi << 
1614  * @folio:      The folio to remove the mappi << 
1615  * @page:       The first page to remove      << 
1616  * @vma:        The vm area from which the ma << 
1617  *                                            << 
1618  * The page range of the folio is defined by  << 
1619  *                                            << 
1620  * The caller needs to hold the page table lo << 
1621  */                                           << 
1622 void folio_remove_rmap_pmd(struct folio *foli << 
1623                 struct vm_area_struct *vma)   << 
1624 {                                             << 
1625 #ifdef CONFIG_TRANSPARENT_HUGEPAGE            << 
1626         __folio_remove_rmap(folio, page, HPAG << 
1627 #else                                         << 
1628         WARN_ON_ONCE(true);                   << 
1629 #endif                                        << 
1630 }                                                1457 }
1631                                                  1458 
1632 /*                                               1459 /*
1633  * @arg: enum ttu_flags will be passed to thi    1460  * @arg: enum ttu_flags will be passed to this argument
1634  */                                              1461  */
1635 static bool try_to_unmap_one(struct folio *fo    1462 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1636                      unsigned long address, v    1463                      unsigned long address, void *arg)
1637 {                                                1464 {
1638         struct mm_struct *mm = vma->vm_mm;       1465         struct mm_struct *mm = vma->vm_mm;
1639         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vm    1466         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1640         pte_t pteval;                            1467         pte_t pteval;
1641         struct page *subpage;                    1468         struct page *subpage;
1642         bool anon_exclusive, ret = true;         1469         bool anon_exclusive, ret = true;
1643         struct mmu_notifier_range range;         1470         struct mmu_notifier_range range;
1644         enum ttu_flags flags = (enum ttu_flag    1471         enum ttu_flags flags = (enum ttu_flags)(long)arg;
1645         unsigned long pfn;                    << 
1646         unsigned long hsz = 0;                << 
1647                                                  1472 
1648         /*                                       1473         /*
1649          * When racing against e.g. zap_pte_r    1474          * When racing against e.g. zap_pte_range() on another cpu,
1650          * in between its ptep_get_and_clear_ !! 1475          * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1651          * try_to_unmap() may return before p    1476          * try_to_unmap() may return before page_mapped() has become false,
1652          * if page table locking is skipped:     1477          * if page table locking is skipped: use TTU_SYNC to wait for that.
1653          */                                      1478          */
1654         if (flags & TTU_SYNC)                    1479         if (flags & TTU_SYNC)
1655                 pvmw.flags = PVMW_SYNC;          1480                 pvmw.flags = PVMW_SYNC;
1656                                                  1481 
                                                   >> 1482         if (flags & TTU_SPLIT_HUGE_PMD)
                                                   >> 1483                 split_huge_pmd_address(vma, address, false, folio);
                                                   >> 1484 
1657         /*                                       1485         /*
1658          * For THP, we have to assume the wor    1486          * For THP, we have to assume the worse case ie pmd for invalidation.
1659          * For hugetlb, it could be much wors    1487          * For hugetlb, it could be much worse if we need to do pud
1660          * invalidation in the case of pmd sh    1488          * invalidation in the case of pmd sharing.
1661          *                                       1489          *
1662          * Note that the folio can not be fre    1490          * Note that the folio can not be freed in this function as call of
1663          * try_to_unmap() must hold a referen    1491          * try_to_unmap() must hold a reference on the folio.
1664          */                                      1492          */
1665         range.end = vma_address_end(&pvmw);      1493         range.end = vma_address_end(&pvmw);
1666         mmu_notifier_range_init(&range, MMU_N !! 1494         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1667                                 address, rang    1495                                 address, range.end);
1668         if (folio_test_hugetlb(folio)) {         1496         if (folio_test_hugetlb(folio)) {
1669                 /*                               1497                 /*
1670                  * If sharing is possible, st    1498                  * If sharing is possible, start and end will be adjusted
1671                  * accordingly.                  1499                  * accordingly.
1672                  */                              1500                  */
1673                 adjust_range_if_pmd_sharing_p    1501                 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1674                                                  1502                                                      &range.end);
1675                                               << 
1676                 /* We need the huge page size << 
1677                 hsz = huge_page_size(hstate_v << 
1678         }                                        1503         }
1679         mmu_notifier_invalidate_range_start(&    1504         mmu_notifier_invalidate_range_start(&range);
1680                                                  1505 
1681         while (page_vma_mapped_walk(&pvmw)) {    1506         while (page_vma_mapped_walk(&pvmw)) {
                                                   >> 1507                 /* Unexpected PMD-mapped THP? */
                                                   >> 1508                 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
                                                   >> 1509 
1682                 /*                               1510                 /*
1683                  * If the folio is in an mloc    1511                  * If the folio is in an mlock()d vma, we must not swap it out.
1684                  */                              1512                  */
1685                 if (!(flags & TTU_IGNORE_MLOC    1513                 if (!(flags & TTU_IGNORE_MLOCK) &&
1686                     (vma->vm_flags & VM_LOCKE    1514                     (vma->vm_flags & VM_LOCKED)) {
1687                         /* Restore the mlock     1515                         /* Restore the mlock which got missed */
1688                         if (!folio_test_large !! 1516                         mlock_vma_folio(folio, vma, false);
1689                                 mlock_vma_fol !! 1517                         page_vma_mapped_walk_done(&pvmw);
1690                         goto walk_abort;      !! 1518                         ret = false;
1691                 }                             !! 1519                         break;
1692                                               << 
1693                 if (!pvmw.pte) {              << 
1694                         if (unmap_huge_pmd_lo << 
1695                                               << 
1696                                 goto walk_don << 
1697                                               << 
1698                         if (flags & TTU_SPLIT << 
1699                                 /*            << 
1700                                  * We tempora << 
1701                                  * restart so << 
1702                                  */           << 
1703                                 split_huge_pm << 
1704                                               << 
1705                                 flags &= ~TTU << 
1706                                 page_vma_mapp << 
1707                                 continue;     << 
1708                         }                     << 
1709                 }                                1520                 }
1710                                                  1521 
1711                 /* Unexpected PMD-mapped THP? !! 1522                 subpage = folio_page(folio,
1712                 VM_BUG_ON_FOLIO(!pvmw.pte, fo !! 1523                                         pte_pfn(*pvmw.pte) - folio_pfn(folio));
1713                                               << 
1714                 pfn = pte_pfn(ptep_get(pvmw.p << 
1715                 subpage = folio_page(folio, p << 
1716                 address = pvmw.address;          1524                 address = pvmw.address;
1717                 anon_exclusive = folio_test_a    1525                 anon_exclusive = folio_test_anon(folio) &&
1718                                  PageAnonExcl    1526                                  PageAnonExclusive(subpage);
1719                                                  1527 
1720                 if (folio_test_hugetlb(folio)    1528                 if (folio_test_hugetlb(folio)) {
1721                         bool anon = folio_tes    1529                         bool anon = folio_test_anon(folio);
1722                                                  1530 
1723                         /*                       1531                         /*
1724                          * The try_to_unmap()    1532                          * The try_to_unmap() is only passed a hugetlb page
1725                          * in the case where     1533                          * in the case where the hugetlb page is poisoned.
1726                          */                      1534                          */
1727                         VM_BUG_ON_PAGE(!PageH    1535                         VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
1728                         /*                       1536                         /*
1729                          * huge_pmd_unshare m    1537                          * huge_pmd_unshare may unmap an entire PMD page.
1730                          * There is no way of    1538                          * There is no way of knowing exactly which PMDs may
1731                          * be cached for this    1539                          * be cached for this mm, so we must flush them all.
1732                          * start/end were alr    1540                          * start/end were already adjusted above to cover this
1733                          * range.                1541                          * range.
1734                          */                      1542                          */
1735                         flush_cache_range(vma    1543                         flush_cache_range(vma, range.start, range.end);
1736                                                  1544 
1737                         /*                       1545                         /*
1738                          * To call huge_pmd_u    1546                          * To call huge_pmd_unshare, i_mmap_rwsem must be
1739                          * held in write mode    1547                          * held in write mode.  Caller needs to explicitly
1740                          * do this outside rm    1548                          * do this outside rmap routines.
1741                          *                       1549                          *
1742                          * We also must hold     1550                          * We also must hold hugetlb vma_lock in write mode.
1743                          * Lock order dictate    1551                          * Lock order dictates acquiring vma_lock BEFORE
1744                          * i_mmap_rwsem.  We     1552                          * i_mmap_rwsem.  We can only try lock here and fail
1745                          * if unsuccessful.      1553                          * if unsuccessful.
1746                          */                      1554                          */
1747                         if (!anon) {             1555                         if (!anon) {
1748                                 VM_BUG_ON(!(f    1556                                 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1749                                 if (!hugetlb_ !! 1557                                 if (!hugetlb_vma_trylock_write(vma)) {
1750                                         goto  !! 1558                                         page_vma_mapped_walk_done(&pvmw);
                                                   >> 1559                                         ret = false;
                                                   >> 1560                                         break;
                                                   >> 1561                                 }
1751                                 if (huge_pmd_    1562                                 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1752                                         huget    1563                                         hugetlb_vma_unlock_write(vma);
1753                                         flush    1564                                         flush_tlb_range(vma,
1754                                                  1565                                                 range.start, range.end);
                                                   >> 1566                                         mmu_notifier_invalidate_range(mm,
                                                   >> 1567                                                 range.start, range.end);
1755                                         /*       1568                                         /*
1756                                          * Th    1569                                          * The ref count of the PMD page was
1757                                          * dr    1570                                          * dropped which is part of the way map
1758                                          * co    1571                                          * counting is done for shared PMDs.
1759                                          * Re    1572                                          * Return 'true' here.  When there is
1760                                          * no    1573                                          * no other sharing, huge_pmd_unshare
1761                                          * re    1574                                          * returns false and we will unmap the
1762                                          * ac    1575                                          * actual page and drop map count
1763                                          * to    1576                                          * to zero.
1764                                          */      1577                                          */
1765                                         goto  !! 1578                                         page_vma_mapped_walk_done(&pvmw);
                                                   >> 1579                                         break;
1766                                 }                1580                                 }
1767                                 hugetlb_vma_u    1581                                 hugetlb_vma_unlock_write(vma);
1768                         }                        1582                         }
1769                         pteval = huge_ptep_cl    1583                         pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1770                 } else {                         1584                 } else {
1771                         flush_cache_page(vma, !! 1585                         flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1772                         /* Nuke the page tabl    1586                         /* Nuke the page table entry. */
1773                         if (should_defer_flus    1587                         if (should_defer_flush(mm, flags)) {
1774                                 /*               1588                                 /*
1775                                  * We clear t    1589                                  * We clear the PTE but do not flush so potentially
1776                                  * a remote C    1590                                  * a remote CPU could still be writing to the folio.
1777                                  * If the ent    1591                                  * If the entry was previously clean then the
1778                                  * architectu    1592                                  * architecture must guarantee that a clear->dirty
1779                                  * transition    1593                                  * transition on a cached TLB entry is written through
1780                                  * and traps     1594                                  * and traps if the PTE is unmapped.
1781                                  */              1595                                  */
1782                                 pteval = ptep    1596                                 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1783                                                  1597 
1784                                 set_tlb_ubc_f !! 1598                                 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1785                         } else {                 1599                         } else {
1786                                 pteval = ptep    1600                                 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1787                         }                        1601                         }
1788                 }                                1602                 }
1789                                                  1603 
1790                 /*                               1604                 /*
1791                  * Now the pte is cleared. If    1605                  * Now the pte is cleared. If this pte was uffd-wp armed,
1792                  * we may want to replace a n    1606                  * we may want to replace a none pte with a marker pte if
1793                  * it's file-backed, so we do    1607                  * it's file-backed, so we don't lose the tracking info.
1794                  */                              1608                  */
1795                 pte_install_uffd_wp_if_needed    1609                 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
1796                                                  1610 
1797                 /* Set the dirty flag on the     1611                 /* Set the dirty flag on the folio now the pte is gone. */
1798                 if (pte_dirty(pteval))           1612                 if (pte_dirty(pteval))
1799                         folio_mark_dirty(foli    1613                         folio_mark_dirty(folio);
1800                                                  1614 
1801                 /* Update high watermark befo    1615                 /* Update high watermark before we lower rss */
1802                 update_hiwater_rss(mm);          1616                 update_hiwater_rss(mm);
1803                                                  1617 
1804                 if (PageHWPoison(subpage) &&     1618                 if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
1805                         pteval = swp_entry_to    1619                         pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1806                         if (folio_test_hugetl    1620                         if (folio_test_hugetlb(folio)) {
1807                                 hugetlb_count    1621                                 hugetlb_count_sub(folio_nr_pages(folio), mm);
1808                                 set_huge_pte_ !! 1622                                 set_huge_pte_at(mm, address, pvmw.pte, pteval);
1809                                               << 
1810                         } else {                 1623                         } else {
1811                                 dec_mm_counte !! 1624                                 dec_mm_counter(mm, mm_counter(&folio->page));
1812                                 set_pte_at(mm    1625                                 set_pte_at(mm, address, pvmw.pte, pteval);
1813                         }                        1626                         }
1814                                                  1627 
1815                 } else if (pte_unused(pteval)    1628                 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1816                         /*                       1629                         /*
1817                          * The guest indicate    1630                          * The guest indicated that the page content is of no
1818                          * interest anymore.     1631                          * interest anymore. Simply discard the pte, vmscan
1819                          * will take care of     1632                          * will take care of the rest.
1820                          * A future reference    1633                          * A future reference will then fault in a new zero
1821                          * page. When userfau    1634                          * page. When userfaultfd is active, we must not drop
1822                          * this page though,     1635                          * this page though, as its main user (postcopy
1823                          * migration) will no    1636                          * migration) will not expect userfaults on already
1824                          * copied pages.         1637                          * copied pages.
1825                          */                      1638                          */
1826                         dec_mm_counter(mm, mm !! 1639                         dec_mm_counter(mm, mm_counter(&folio->page));
                                                   >> 1640                         /* We have to invalidate as we cleared the pte */
                                                   >> 1641                         mmu_notifier_invalidate_range(mm, address,
                                                   >> 1642                                                       address + PAGE_SIZE);
1827                 } else if (folio_test_anon(fo    1643                 } else if (folio_test_anon(folio)) {
1828                         swp_entry_t entry = p !! 1644                         swp_entry_t entry = { .val = page_private(subpage) };
1829                         pte_t swp_pte;           1645                         pte_t swp_pte;
1830                         /*                       1646                         /*
1831                          * Store the swap loc    1647                          * Store the swap location in the pte.
1832                          * See handle_pte_fau    1648                          * See handle_pte_fault() ...
1833                          */                      1649                          */
1834                         if (unlikely(folio_te    1650                         if (unlikely(folio_test_swapbacked(folio) !=
1835                                         folio    1651                                         folio_test_swapcache(folio))) {
1836                                 WARN_ON_ONCE(    1652                                 WARN_ON_ONCE(1);
1837                                 goto walk_abo !! 1653                                 ret = false;
                                                   >> 1654                                 /* We have to invalidate as we cleared the pte */
                                                   >> 1655                                 mmu_notifier_invalidate_range(mm, address,
                                                   >> 1656                                                         address + PAGE_SIZE);
                                                   >> 1657                                 page_vma_mapped_walk_done(&pvmw);
                                                   >> 1658                                 break;
1838                         }                        1659                         }
1839                                                  1660 
1840                         /* MADV_FREE page che    1661                         /* MADV_FREE page check */
1841                         if (!folio_test_swapb    1662                         if (!folio_test_swapbacked(folio)) {
1842                                 int ref_count    1663                                 int ref_count, map_count;
1843                                                  1664 
1844                                 /*               1665                                 /*
1845                                  * Synchroniz    1666                                  * Synchronize with gup_pte_range():
1846                                  * - clear PT    1667                                  * - clear PTE; barrier; read refcount
1847                                  * - inc refc    1668                                  * - inc refcount; barrier; read PTE
1848                                  */              1669                                  */
1849                                 smp_mb();        1670                                 smp_mb();
1850                                                  1671 
1851                                 ref_count = f    1672                                 ref_count = folio_ref_count(folio);
1852                                 map_count = f    1673                                 map_count = folio_mapcount(folio);
1853                                                  1674 
1854                                 /*               1675                                 /*
1855                                  * Order read    1676                                  * Order reads for page refcount and dirty flag
1856                                  * (see comme    1677                                  * (see comments in __remove_mapping()).
1857                                  */              1678                                  */
1858                                 smp_rmb();       1679                                 smp_rmb();
1859                                                  1680 
1860                                 /*               1681                                 /*
1861                                  * The only p    1682                                  * The only page refs must be one from isolation
1862                                  * plus the r    1683                                  * plus the rmap(s) (dropped by discard:).
1863                                  */              1684                                  */
1864                                 if (ref_count    1685                                 if (ref_count == 1 + map_count &&
1865                                     (!folio_t !! 1686                                     !folio_test_dirty(folio)) {
1866                                      /*       !! 1687                                         /* Invalidate as we cleared the pte */
1867                                       * Unlik !! 1688                                         mmu_notifier_invalidate_range(mm,
1868                                       * ones  !! 1689                                                 address, address + PAGE_SIZE);
1869                                       * been  << 
1870                                       */      << 
1871                                      (vma->vm << 
1872                                         dec_m    1690                                         dec_mm_counter(mm, MM_ANONPAGES);
1873                                         goto     1691                                         goto discard;
1874                                 }                1692                                 }
1875                                                  1693 
1876                                 /*               1694                                 /*
1877                                  * If the fol    1695                                  * If the folio was redirtied, it cannot be
1878                                  * discarded.    1696                                  * discarded. Remap the page to page table.
1879                                  */              1697                                  */
1880                                 set_pte_at(mm    1698                                 set_pte_at(mm, address, pvmw.pte, pteval);
1881                                 /*            !! 1699                                 folio_set_swapbacked(folio);
1882                                  * Unlike MAD !! 1700                                 ret = false;
1883                                  * never get  !! 1701                                 page_vma_mapped_walk_done(&pvmw);
1884                                  */           !! 1702                                 break;
1885                                 if (!(vma->vm << 
1886                                         folio << 
1887                                 goto walk_abo << 
1888                         }                        1703                         }
1889                                                  1704 
1890                         if (swap_duplicate(en    1705                         if (swap_duplicate(entry) < 0) {
1891                                 set_pte_at(mm    1706                                 set_pte_at(mm, address, pvmw.pte, pteval);
1892                                 goto walk_abo !! 1707                                 ret = false;
                                                   >> 1708                                 page_vma_mapped_walk_done(&pvmw);
                                                   >> 1709                                 break;
1893                         }                        1710                         }
1894                         if (arch_unmap_one(mm    1711                         if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1895                                 swap_free(ent    1712                                 swap_free(entry);
1896                                 set_pte_at(mm    1713                                 set_pte_at(mm, address, pvmw.pte, pteval);
1897                                 goto walk_abo !! 1714                                 ret = false;
                                                   >> 1715                                 page_vma_mapped_walk_done(&pvmw);
                                                   >> 1716                                 break;
1898                         }                        1717                         }
1899                                                  1718 
1900                         /* See folio_try_shar !! 1719                         /* See page_try_share_anon_rmap(): clear PTE first. */
1901                         if (anon_exclusive &&    1720                         if (anon_exclusive &&
1902                             folio_try_share_a !! 1721                             page_try_share_anon_rmap(subpage)) {
1903                                 swap_free(ent    1722                                 swap_free(entry);
1904                                 set_pte_at(mm    1723                                 set_pte_at(mm, address, pvmw.pte, pteval);
1905                                 goto walk_abo !! 1724                                 ret = false;
                                                   >> 1725                                 page_vma_mapped_walk_done(&pvmw);
                                                   >> 1726                                 break;
1906                         }                        1727                         }
                                                   >> 1728                         /*
                                                   >> 1729                          * Note: We *don't* remember if the page was mapped
                                                   >> 1730                          * exclusively in the swap pte if the architecture
                                                   >> 1731                          * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In
                                                   >> 1732                          * that case, swapin code has to re-determine that
                                                   >> 1733                          * manually and might detect the page as possibly
                                                   >> 1734                          * shared, for example, if there are other references on
                                                   >> 1735                          * the page or if the page is under writeback. We made
                                                   >> 1736                          * sure that there are no GUP pins on the page that
                                                   >> 1737                          * would rely on it, so for GUP pins this is fine.
                                                   >> 1738                          */
1907                         if (list_empty(&mm->m    1739                         if (list_empty(&mm->mmlist)) {
1908                                 spin_lock(&mm    1740                                 spin_lock(&mmlist_lock);
1909                                 if (list_empt    1741                                 if (list_empty(&mm->mmlist))
1910                                         list_    1742                                         list_add(&mm->mmlist, &init_mm.mmlist);
1911                                 spin_unlock(&    1743                                 spin_unlock(&mmlist_lock);
1912                         }                        1744                         }
1913                         dec_mm_counter(mm, MM    1745                         dec_mm_counter(mm, MM_ANONPAGES);
1914                         inc_mm_counter(mm, MM    1746                         inc_mm_counter(mm, MM_SWAPENTS);
1915                         swp_pte = swp_entry_t    1747                         swp_pte = swp_entry_to_pte(entry);
1916                         if (anon_exclusive)      1748                         if (anon_exclusive)
1917                                 swp_pte = pte    1749                                 swp_pte = pte_swp_mkexclusive(swp_pte);
1918                         if (pte_soft_dirty(pt    1750                         if (pte_soft_dirty(pteval))
1919                                 swp_pte = pte    1751                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1920                         if (pte_uffd_wp(pteva    1752                         if (pte_uffd_wp(pteval))
1921                                 swp_pte = pte    1753                                 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1922                         set_pte_at(mm, addres    1754                         set_pte_at(mm, address, pvmw.pte, swp_pte);
                                                   >> 1755                         /* Invalidate as we cleared the pte */
                                                   >> 1756                         mmu_notifier_invalidate_range(mm, address,
                                                   >> 1757                                                       address + PAGE_SIZE);
1923                 } else {                         1758                 } else {
1924                         /*                       1759                         /*
1925                          * This is a locked f    1760                          * This is a locked file-backed folio,
1926                          * so it cannot be re    1761                          * so it cannot be removed from the page
1927                          * cache and replaced    1762                          * cache and replaced by a new folio before
1928                          * mmu_notifier_inval    1763                          * mmu_notifier_invalidate_range_end, so no
1929                          * concurrent thread     1764                          * concurrent thread might update its page table
1930                          * to point at a new     1765                          * to point at a new folio while a device is
1931                          * still using this f    1766                          * still using this folio.
1932                          *                       1767                          *
1933                          * See Documentation/    1768                          * See Documentation/mm/mmu_notifier.rst
1934                          */                      1769                          */
1935                         dec_mm_counter(mm, mm !! 1770                         dec_mm_counter(mm, mm_counter_file(&folio->page));
1936                 }                                1771                 }
1937 discard:                                         1772 discard:
1938                 if (unlikely(folio_test_huget !! 1773                 /*
1939                         hugetlb_remove_rmap(f !! 1774                  * No need to call mmu_notifier_invalidate_range() it has be
1940                 else                          !! 1775                  * done above for all cases requiring it to happen under page
1941                         folio_remove_rmap_pte !! 1776                  * table lock before mmu_notifier_invalidate_range_end()
                                                   >> 1777                  *
                                                   >> 1778                  * See Documentation/mm/mmu_notifier.rst
                                                   >> 1779                  */
                                                   >> 1780                 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
1942                 if (vma->vm_flags & VM_LOCKED    1781                 if (vma->vm_flags & VM_LOCKED)
1943                         mlock_drain_local();  !! 1782                         mlock_page_drain_local();
1944                 folio_put(folio);                1783                 folio_put(folio);
1945                 continue;                     << 
1946 walk_abort:                                   << 
1947                 ret = false;                  << 
1948 walk_done:                                    << 
1949                 page_vma_mapped_walk_done(&pv << 
1950                 break;                        << 
1951         }                                        1784         }
1952                                                  1785 
1953         mmu_notifier_invalidate_range_end(&ra    1786         mmu_notifier_invalidate_range_end(&range);
1954                                                  1787 
1955         return ret;                              1788         return ret;
1956 }                                                1789 }
1957                                                  1790 
1958 static bool invalid_migration_vma(struct vm_a    1791 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1959 {                                                1792 {
1960         return vma_is_temporary_stack(vma);      1793         return vma_is_temporary_stack(vma);
1961 }                                                1794 }
1962                                                  1795 
1963 static int folio_not_mapped(struct folio *fol    1796 static int folio_not_mapped(struct folio *folio)
1964 {                                                1797 {
1965         return !folio_mapped(folio);             1798         return !folio_mapped(folio);
1966 }                                                1799 }
1967                                                  1800 
1968 /**                                              1801 /**
1969  * try_to_unmap - Try to remove all page tabl    1802  * try_to_unmap - Try to remove all page table mappings to a folio.
1970  * @folio: The folio to unmap.                   1803  * @folio: The folio to unmap.
1971  * @flags: action and flags                      1804  * @flags: action and flags
1972  *                                               1805  *
1973  * Tries to remove all the page table entries    1806  * Tries to remove all the page table entries which are mapping this
1974  * folio.  It is the caller's responsibility     1807  * folio.  It is the caller's responsibility to check if the folio is
1975  * still mapped if needed (use TTU_SYNC to pr    1808  * still mapped if needed (use TTU_SYNC to prevent accounting races).
1976  *                                               1809  *
1977  * Context: Caller must hold the folio lock.     1810  * Context: Caller must hold the folio lock.
1978  */                                              1811  */
1979 void try_to_unmap(struct folio *folio, enum t    1812 void try_to_unmap(struct folio *folio, enum ttu_flags flags)
1980 {                                                1813 {
1981         struct rmap_walk_control rwc = {         1814         struct rmap_walk_control rwc = {
1982                 .rmap_one = try_to_unmap_one,    1815                 .rmap_one = try_to_unmap_one,
1983                 .arg = (void *)flags,            1816                 .arg = (void *)flags,
1984                 .done = folio_not_mapped,        1817                 .done = folio_not_mapped,
1985                 .anon_lock = folio_lock_anon_    1818                 .anon_lock = folio_lock_anon_vma_read,
1986         };                                       1819         };
1987                                                  1820 
1988         if (flags & TTU_RMAP_LOCKED)             1821         if (flags & TTU_RMAP_LOCKED)
1989                 rmap_walk_locked(folio, &rwc)    1822                 rmap_walk_locked(folio, &rwc);
1990         else                                     1823         else
1991                 rmap_walk(folio, &rwc);          1824                 rmap_walk(folio, &rwc);
1992 }                                                1825 }
1993                                                  1826 
1994 /*                                               1827 /*
1995  * @arg: enum ttu_flags will be passed to thi    1828  * @arg: enum ttu_flags will be passed to this argument.
1996  *                                               1829  *
1997  * If TTU_SPLIT_HUGE_PMD is specified any PMD    1830  * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
1998  * containing migration entries.                 1831  * containing migration entries.
1999  */                                              1832  */
2000 static bool try_to_migrate_one(struct folio *    1833 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
2001                      unsigned long address, v    1834                      unsigned long address, void *arg)
2002 {                                                1835 {
2003         struct mm_struct *mm = vma->vm_mm;       1836         struct mm_struct *mm = vma->vm_mm;
2004         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vm    1837         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2005         pte_t pteval;                            1838         pte_t pteval;
2006         struct page *subpage;                    1839         struct page *subpage;
2007         bool anon_exclusive, ret = true;         1840         bool anon_exclusive, ret = true;
2008         struct mmu_notifier_range range;         1841         struct mmu_notifier_range range;
2009         enum ttu_flags flags = (enum ttu_flag    1842         enum ttu_flags flags = (enum ttu_flags)(long)arg;
2010         unsigned long pfn;                    << 
2011         unsigned long hsz = 0;                << 
2012                                                  1843 
2013         /*                                       1844         /*
2014          * When racing against e.g. zap_pte_r    1845          * When racing against e.g. zap_pte_range() on another cpu,
2015          * in between its ptep_get_and_clear_ !! 1846          * in between its ptep_get_and_clear_full() and page_remove_rmap(),
2016          * try_to_migrate() may return before    1847          * try_to_migrate() may return before page_mapped() has become false,
2017          * if page table locking is skipped:     1848          * if page table locking is skipped: use TTU_SYNC to wait for that.
2018          */                                      1849          */
2019         if (flags & TTU_SYNC)                    1850         if (flags & TTU_SYNC)
2020                 pvmw.flags = PVMW_SYNC;          1851                 pvmw.flags = PVMW_SYNC;
2021                                                  1852 
2022         /*                                       1853         /*
2023          * unmap_page() in mm/huge_memory.c i    1854          * unmap_page() in mm/huge_memory.c is the only user of migration with
2024          * TTU_SPLIT_HUGE_PMD and it wants to    1855          * TTU_SPLIT_HUGE_PMD and it wants to freeze.
2025          */                                      1856          */
2026         if (flags & TTU_SPLIT_HUGE_PMD)          1857         if (flags & TTU_SPLIT_HUGE_PMD)
2027                 split_huge_pmd_address(vma, a    1858                 split_huge_pmd_address(vma, address, true, folio);
2028                                                  1859 
2029         /*                                       1860         /*
2030          * For THP, we have to assume the wor    1861          * For THP, we have to assume the worse case ie pmd for invalidation.
2031          * For hugetlb, it could be much wors    1862          * For hugetlb, it could be much worse if we need to do pud
2032          * invalidation in the case of pmd sh    1863          * invalidation in the case of pmd sharing.
2033          *                                       1864          *
2034          * Note that the page can not be free    1865          * Note that the page can not be free in this function as call of
2035          * try_to_unmap() must hold a referen    1866          * try_to_unmap() must hold a reference on the page.
2036          */                                      1867          */
2037         range.end = vma_address_end(&pvmw);      1868         range.end = vma_address_end(&pvmw);
2038         mmu_notifier_range_init(&range, MMU_N !! 1869         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
2039                                 address, rang    1870                                 address, range.end);
2040         if (folio_test_hugetlb(folio)) {         1871         if (folio_test_hugetlb(folio)) {
2041                 /*                               1872                 /*
2042                  * If sharing is possible, st    1873                  * If sharing is possible, start and end will be adjusted
2043                  * accordingly.                  1874                  * accordingly.
2044                  */                              1875                  */
2045                 adjust_range_if_pmd_sharing_p    1876                 adjust_range_if_pmd_sharing_possible(vma, &range.start,
2046                                                  1877                                                      &range.end);
2047                                               << 
2048                 /* We need the huge page size << 
2049                 hsz = huge_page_size(hstate_v << 
2050         }                                        1878         }
2051         mmu_notifier_invalidate_range_start(&    1879         mmu_notifier_invalidate_range_start(&range);
2052                                                  1880 
2053         while (page_vma_mapped_walk(&pvmw)) {    1881         while (page_vma_mapped_walk(&pvmw)) {
2054 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION          1882 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2055                 /* PMD-mapped THP migration e    1883                 /* PMD-mapped THP migration entry */
2056                 if (!pvmw.pte) {                 1884                 if (!pvmw.pte) {
2057                         subpage = folio_page(    1885                         subpage = folio_page(folio,
2058                                 pmd_pfn(*pvmw    1886                                 pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
2059                         VM_BUG_ON_FOLIO(folio    1887                         VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
2060                                         !foli    1888                                         !folio_test_pmd_mappable(folio), folio);
2061                                                  1889 
2062                         if (set_pmd_migration    1890                         if (set_pmd_migration_entry(&pvmw, subpage)) {
2063                                 ret = false;     1891                                 ret = false;
2064                                 page_vma_mapp    1892                                 page_vma_mapped_walk_done(&pvmw);
2065                                 break;           1893                                 break;
2066                         }                        1894                         }
2067                         continue;                1895                         continue;
2068                 }                                1896                 }
2069 #endif                                           1897 #endif
2070                                                  1898 
2071                 /* Unexpected PMD-mapped THP?    1899                 /* Unexpected PMD-mapped THP? */
2072                 VM_BUG_ON_FOLIO(!pvmw.pte, fo    1900                 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2073                                                  1901 
2074                 pfn = pte_pfn(ptep_get(pvmw.p << 
2075                                               << 
2076                 if (folio_is_zone_device(foli    1902                 if (folio_is_zone_device(folio)) {
2077                         /*                       1903                         /*
2078                          * Our PTE is a non-p    1904                          * Our PTE is a non-present device exclusive entry and
2079                          * calculating the su    1905                          * calculating the subpage as for the common case would
2080                          * result in an inval    1906                          * result in an invalid pointer.
2081                          *                       1907                          *
2082                          * Since only PAGE_SI    1908                          * Since only PAGE_SIZE pages can currently be
2083                          * migrated, just set    1909                          * migrated, just set it to page. This will need to be
2084                          * changed when hugep    1910                          * changed when hugepage migrations to device private
2085                          * memory are support    1911                          * memory are supported.
2086                          */                      1912                          */
2087                         VM_BUG_ON_FOLIO(folio    1913                         VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
2088                         subpage = &folio->pag    1914                         subpage = &folio->page;
2089                 } else {                         1915                 } else {
2090                         subpage = folio_page( !! 1916                         subpage = folio_page(folio,
                                                   >> 1917                                         pte_pfn(*pvmw.pte) - folio_pfn(folio));
2091                 }                                1918                 }
2092                 address = pvmw.address;          1919                 address = pvmw.address;
2093                 anon_exclusive = folio_test_a    1920                 anon_exclusive = folio_test_anon(folio) &&
2094                                  PageAnonExcl    1921                                  PageAnonExclusive(subpage);
2095                                                  1922 
2096                 if (folio_test_hugetlb(folio)    1923                 if (folio_test_hugetlb(folio)) {
2097                         bool anon = folio_tes    1924                         bool anon = folio_test_anon(folio);
2098                                                  1925 
2099                         /*                       1926                         /*
2100                          * huge_pmd_unshare m    1927                          * huge_pmd_unshare may unmap an entire PMD page.
2101                          * There is no way of    1928                          * There is no way of knowing exactly which PMDs may
2102                          * be cached for this    1929                          * be cached for this mm, so we must flush them all.
2103                          * start/end were alr    1930                          * start/end were already adjusted above to cover this
2104                          * range.                1931                          * range.
2105                          */                      1932                          */
2106                         flush_cache_range(vma    1933                         flush_cache_range(vma, range.start, range.end);
2107                                                  1934 
2108                         /*                       1935                         /*
2109                          * To call huge_pmd_u    1936                          * To call huge_pmd_unshare, i_mmap_rwsem must be
2110                          * held in write mode    1937                          * held in write mode.  Caller needs to explicitly
2111                          * do this outside rm    1938                          * do this outside rmap routines.
2112                          *                       1939                          *
2113                          * We also must hold     1940                          * We also must hold hugetlb vma_lock in write mode.
2114                          * Lock order dictate    1941                          * Lock order dictates acquiring vma_lock BEFORE
2115                          * i_mmap_rwsem.  We     1942                          * i_mmap_rwsem.  We can only try lock here and
2116                          * fail if unsuccessf    1943                          * fail if unsuccessful.
2117                          */                      1944                          */
2118                         if (!anon) {             1945                         if (!anon) {
2119                                 VM_BUG_ON(!(f    1946                                 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
2120                                 if (!hugetlb_    1947                                 if (!hugetlb_vma_trylock_write(vma)) {
2121                                         page_    1948                                         page_vma_mapped_walk_done(&pvmw);
2122                                         ret =    1949                                         ret = false;
2123                                         break    1950                                         break;
2124                                 }                1951                                 }
2125                                 if (huge_pmd_    1952                                 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
2126                                         huget    1953                                         hugetlb_vma_unlock_write(vma);
2127                                         flush    1954                                         flush_tlb_range(vma,
2128                                                  1955                                                 range.start, range.end);
                                                   >> 1956                                         mmu_notifier_invalidate_range(mm,
                                                   >> 1957                                                 range.start, range.end);
2129                                                  1958 
2130                                         /*       1959                                         /*
2131                                          * Th    1960                                          * The ref count of the PMD page was
2132                                          * dr    1961                                          * dropped which is part of the way map
2133                                          * co    1962                                          * counting is done for shared PMDs.
2134                                          * Re    1963                                          * Return 'true' here.  When there is
2135                                          * no    1964                                          * no other sharing, huge_pmd_unshare
2136                                          * re    1965                                          * returns false and we will unmap the
2137                                          * ac    1966                                          * actual page and drop map count
2138                                          * to    1967                                          * to zero.
2139                                          */      1968                                          */
2140                                         page_    1969                                         page_vma_mapped_walk_done(&pvmw);
2141                                         break    1970                                         break;
2142                                 }                1971                                 }
2143                                 hugetlb_vma_u    1972                                 hugetlb_vma_unlock_write(vma);
2144                         }                        1973                         }
2145                         /* Nuke the hugetlb p    1974                         /* Nuke the hugetlb page table entry */
2146                         pteval = huge_ptep_cl    1975                         pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
2147                 } else {                         1976                 } else {
2148                         flush_cache_page(vma, !! 1977                         flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
2149                         /* Nuke the page tabl    1978                         /* Nuke the page table entry. */
2150                         if (should_defer_flus !! 1979                         pteval = ptep_clear_flush(vma, address, pvmw.pte);
2151                                 /*            << 
2152                                  * We clear t << 
2153                                  * a remote C << 
2154                                  * If the ent << 
2155                                  * architectu << 
2156                                  * transition << 
2157                                  * and traps  << 
2158                                  */           << 
2159                                 pteval = ptep << 
2160                                               << 
2161                                 set_tlb_ubc_f << 
2162                         } else {              << 
2163                                 pteval = ptep << 
2164                         }                     << 
2165                 }                                1980                 }
2166                                                  1981 
2167                 /* Set the dirty flag on the     1982                 /* Set the dirty flag on the folio now the pte is gone. */
2168                 if (pte_dirty(pteval))           1983                 if (pte_dirty(pteval))
2169                         folio_mark_dirty(foli    1984                         folio_mark_dirty(folio);
2170                                                  1985 
2171                 /* Update high watermark befo    1986                 /* Update high watermark before we lower rss */
2172                 update_hiwater_rss(mm);          1987                 update_hiwater_rss(mm);
2173                                                  1988 
2174                 if (folio_is_device_private(f    1989                 if (folio_is_device_private(folio)) {
2175                         unsigned long pfn = f    1990                         unsigned long pfn = folio_pfn(folio);
2176                         swp_entry_t entry;       1991                         swp_entry_t entry;
2177                         pte_t swp_pte;           1992                         pte_t swp_pte;
2178                                                  1993 
2179                         if (anon_exclusive)      1994                         if (anon_exclusive)
2180                                 WARN_ON_ONCE( !! 1995                                 BUG_ON(page_try_share_anon_rmap(subpage));
2181                                               << 
2182                                                  1996 
2183                         /*                       1997                         /*
2184                          * Store the pfn of t    1998                          * Store the pfn of the page in a special migration
2185                          * pte. do_swap_page(    1999                          * pte. do_swap_page() will wait until the migration
2186                          * pte is removed and    2000                          * pte is removed and then restart fault handling.
2187                          */                      2001                          */
2188                         entry = pte_to_swp_en    2002                         entry = pte_to_swp_entry(pteval);
2189                         if (is_writable_devic    2003                         if (is_writable_device_private_entry(entry))
2190                                 entry = make_    2004                                 entry = make_writable_migration_entry(pfn);
2191                         else if (anon_exclusi    2005                         else if (anon_exclusive)
2192                                 entry = make_    2006                                 entry = make_readable_exclusive_migration_entry(pfn);
2193                         else                     2007                         else
2194                                 entry = make_    2008                                 entry = make_readable_migration_entry(pfn);
2195                         swp_pte = swp_entry_t    2009                         swp_pte = swp_entry_to_pte(entry);
2196                                                  2010 
2197                         /*                       2011                         /*
2198                          * pteval maps a zone    2012                          * pteval maps a zone device page and is therefore
2199                          * a swap pte.           2013                          * a swap pte.
2200                          */                      2014                          */
2201                         if (pte_swp_soft_dirt    2015                         if (pte_swp_soft_dirty(pteval))
2202                                 swp_pte = pte    2016                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2203                         if (pte_swp_uffd_wp(p    2017                         if (pte_swp_uffd_wp(pteval))
2204                                 swp_pte = pte    2018                                 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2205                         set_pte_at(mm, pvmw.a    2019                         set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
2206                         trace_set_migration_p    2020                         trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
2207                                               !! 2021                                                 compound_order(&folio->page));
2208                         /*                       2022                         /*
2209                          * No need to invalid    2023                          * No need to invalidate here it will synchronize on
2210                          * against the specia    2024                          * against the special swap migration pte.
2211                          */                      2025                          */
2212                 } else if (PageHWPoison(subpa    2026                 } else if (PageHWPoison(subpage)) {
2213                         pteval = swp_entry_to    2027                         pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2214                         if (folio_test_hugetl    2028                         if (folio_test_hugetlb(folio)) {
2215                                 hugetlb_count    2029                                 hugetlb_count_sub(folio_nr_pages(folio), mm);
2216                                 set_huge_pte_ !! 2030                                 set_huge_pte_at(mm, address, pvmw.pte, pteval);
2217                                               << 
2218                         } else {                 2031                         } else {
2219                                 dec_mm_counte !! 2032                                 dec_mm_counter(mm, mm_counter(&folio->page));
2220                                 set_pte_at(mm    2033                                 set_pte_at(mm, address, pvmw.pte, pteval);
2221                         }                        2034                         }
2222                                                  2035 
2223                 } else if (pte_unused(pteval)    2036                 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
2224                         /*                       2037                         /*
2225                          * The guest indicate    2038                          * The guest indicated that the page content is of no
2226                          * interest anymore.     2039                          * interest anymore. Simply discard the pte, vmscan
2227                          * will take care of     2040                          * will take care of the rest.
2228                          * A future reference    2041                          * A future reference will then fault in a new zero
2229                          * page. When userfau    2042                          * page. When userfaultfd is active, we must not drop
2230                          * this page though,     2043                          * this page though, as its main user (postcopy
2231                          * migration) will no    2044                          * migration) will not expect userfaults on already
2232                          * copied pages.         2045                          * copied pages.
2233                          */                      2046                          */
2234                         dec_mm_counter(mm, mm !! 2047                         dec_mm_counter(mm, mm_counter(&folio->page));
                                                   >> 2048                         /* We have to invalidate as we cleared the pte */
                                                   >> 2049                         mmu_notifier_invalidate_range(mm, address,
                                                   >> 2050                                                       address + PAGE_SIZE);
2235                 } else {                         2051                 } else {
2236                         swp_entry_t entry;       2052                         swp_entry_t entry;
2237                         pte_t swp_pte;           2053                         pte_t swp_pte;
2238                                                  2054 
2239                         if (arch_unmap_one(mm    2055                         if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2240                                 if (folio_tes    2056                                 if (folio_test_hugetlb(folio))
2241                                         set_h !! 2057                                         set_huge_pte_at(mm, address, pvmw.pte, pteval);
2242                                               << 
2243                                 else             2058                                 else
2244                                         set_p    2059                                         set_pte_at(mm, address, pvmw.pte, pteval);
2245                                 ret = false;     2060                                 ret = false;
2246                                 page_vma_mapp    2061                                 page_vma_mapped_walk_done(&pvmw);
2247                                 break;           2062                                 break;
2248                         }                        2063                         }
2249                         VM_BUG_ON_PAGE(pte_wr    2064                         VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
2250                                        !anon_    2065                                        !anon_exclusive, subpage);
2251                                                  2066 
2252                         /* See folio_try_shar !! 2067                         /* See page_try_share_anon_rmap(): clear PTE first. */
2253                         if (folio_test_hugetl !! 2068                         if (anon_exclusive &&
2254                                 if (anon_excl !! 2069                             page_try_share_anon_rmap(subpage)) {
2255                                     hugetlb_t !! 2070                                 if (folio_test_hugetlb(folio))
2256                                         set_h !! 2071                                         set_huge_pte_at(mm, address, pvmw.pte, pteval);
2257                                               !! 2072                                 else
2258                                         ret = !! 2073                                         set_pte_at(mm, address, pvmw.pte, pteval);
2259                                         page_ << 
2260                                         break << 
2261                                 }             << 
2262                         } else if (anon_exclu << 
2263                                    folio_try_ << 
2264                                 set_pte_at(mm << 
2265                                 ret = false;     2074                                 ret = false;
2266                                 page_vma_mapp    2075                                 page_vma_mapped_walk_done(&pvmw);
2267                                 break;           2076                                 break;
2268                         }                        2077                         }
2269                                                  2078 
2270                         /*                       2079                         /*
2271                          * Store the pfn of t    2080                          * Store the pfn of the page in a special migration
2272                          * pte. do_swap_page(    2081                          * pte. do_swap_page() will wait until the migration
2273                          * pte is removed and    2082                          * pte is removed and then restart fault handling.
2274                          */                      2083                          */
2275                         if (pte_write(pteval)    2084                         if (pte_write(pteval))
2276                                 entry = make_    2085                                 entry = make_writable_migration_entry(
2277                                                  2086                                                         page_to_pfn(subpage));
2278                         else if (anon_exclusi    2087                         else if (anon_exclusive)
2279                                 entry = make_    2088                                 entry = make_readable_exclusive_migration_entry(
2280                                                  2089                                                         page_to_pfn(subpage));
2281                         else                     2090                         else
2282                                 entry = make_    2091                                 entry = make_readable_migration_entry(
2283                                                  2092                                                         page_to_pfn(subpage));
2284                         if (pte_young(pteval)    2093                         if (pte_young(pteval))
2285                                 entry = make_    2094                                 entry = make_migration_entry_young(entry);
2286                         if (pte_dirty(pteval)    2095                         if (pte_dirty(pteval))
2287                                 entry = make_    2096                                 entry = make_migration_entry_dirty(entry);
2288                         swp_pte = swp_entry_t    2097                         swp_pte = swp_entry_to_pte(entry);
2289                         if (pte_soft_dirty(pt    2098                         if (pte_soft_dirty(pteval))
2290                                 swp_pte = pte    2099                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2291                         if (pte_uffd_wp(pteva    2100                         if (pte_uffd_wp(pteval))
2292                                 swp_pte = pte    2101                                 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2293                         if (folio_test_hugetl    2102                         if (folio_test_hugetlb(folio))
2294                                 set_huge_pte_ !! 2103                                 set_huge_pte_at(mm, address, pvmw.pte, swp_pte);
2295                                               << 
2296                         else                     2104                         else
2297                                 set_pte_at(mm    2105                                 set_pte_at(mm, address, pvmw.pte, swp_pte);
2298                         trace_set_migration_p    2106                         trace_set_migration_pte(address, pte_val(swp_pte),
2299                                               !! 2107                                                 compound_order(&folio->page));
2300                         /*                       2108                         /*
2301                          * No need to invalid    2109                          * No need to invalidate here it will synchronize on
2302                          * against the specia    2110                          * against the special swap migration pte.
2303                          */                      2111                          */
2304                 }                                2112                 }
2305                                                  2113 
2306                 if (unlikely(folio_test_huget !! 2114                 /*
2307                         hugetlb_remove_rmap(f !! 2115                  * No need to call mmu_notifier_invalidate_range() it has be
2308                 else                          !! 2116                  * done above for all cases requiring it to happen under page
2309                         folio_remove_rmap_pte !! 2117                  * table lock before mmu_notifier_invalidate_range_end()
                                                   >> 2118                  *
                                                   >> 2119                  * See Documentation/mm/mmu_notifier.rst
                                                   >> 2120                  */
                                                   >> 2121                 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
2310                 if (vma->vm_flags & VM_LOCKED    2122                 if (vma->vm_flags & VM_LOCKED)
2311                         mlock_drain_local();  !! 2123                         mlock_page_drain_local();
2312                 folio_put(folio);                2124                 folio_put(folio);
2313         }                                        2125         }
2314                                                  2126 
2315         mmu_notifier_invalidate_range_end(&ra    2127         mmu_notifier_invalidate_range_end(&range);
2316                                                  2128 
2317         return ret;                              2129         return ret;
2318 }                                                2130 }
2319                                                  2131 
2320 /**                                              2132 /**
2321  * try_to_migrate - try to replace all page t    2133  * try_to_migrate - try to replace all page table mappings with swap entries
2322  * @folio: the folio to replace page table en    2134  * @folio: the folio to replace page table entries for
2323  * @flags: action and flags                      2135  * @flags: action and flags
2324  *                                               2136  *
2325  * Tries to remove all the page table entries    2137  * Tries to remove all the page table entries which are mapping this folio and
2326  * replace them with special swap entries. Ca    2138  * replace them with special swap entries. Caller must hold the folio lock.
2327  */                                              2139  */
2328 void try_to_migrate(struct folio *folio, enum    2140 void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2329 {                                                2141 {
2330         struct rmap_walk_control rwc = {         2142         struct rmap_walk_control rwc = {
2331                 .rmap_one = try_to_migrate_on    2143                 .rmap_one = try_to_migrate_one,
2332                 .arg = (void *)flags,            2144                 .arg = (void *)flags,
2333                 .done = folio_not_mapped,        2145                 .done = folio_not_mapped,
2334                 .anon_lock = folio_lock_anon_    2146                 .anon_lock = folio_lock_anon_vma_read,
2335         };                                       2147         };
2336                                                  2148 
2337         /*                                       2149         /*
2338          * Migration always ignores mlock and    2150          * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2339          * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and  !! 2151          * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags.
2340          */                                      2152          */
2341         if (WARN_ON_ONCE(flags & ~(TTU_RMAP_L    2153         if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2342                                         TTU_S !! 2154                                         TTU_SYNC)))
2343                 return;                          2155                 return;
2344                                                  2156 
2345         if (folio_is_zone_device(folio) &&       2157         if (folio_is_zone_device(folio) &&
2346             (!folio_is_device_private(folio)     2158             (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2347                 return;                          2159                 return;
2348                                                  2160 
2349         /*                                       2161         /*
2350          * During exec, a temporary VMA is se    2162          * During exec, a temporary VMA is setup and later moved.
2351          * The VMA is moved under the anon_vm    2163          * The VMA is moved under the anon_vma lock but not the
2352          * page tables leading to a race wher    2164          * page tables leading to a race where migration cannot
2353          * find the migration ptes. Rather th    2165          * find the migration ptes. Rather than increasing the
2354          * locking requirements of exec(), mi    2166          * locking requirements of exec(), migration skips
2355          * temporary VMAs until after exec()     2167          * temporary VMAs until after exec() completes.
2356          */                                      2168          */
2357         if (!folio_test_ksm(folio) && folio_t    2169         if (!folio_test_ksm(folio) && folio_test_anon(folio))
2358                 rwc.invalid_vma = invalid_mig    2170                 rwc.invalid_vma = invalid_migration_vma;
2359                                                  2171 
2360         if (flags & TTU_RMAP_LOCKED)             2172         if (flags & TTU_RMAP_LOCKED)
2361                 rmap_walk_locked(folio, &rwc)    2173                 rmap_walk_locked(folio, &rwc);
2362         else                                     2174         else
2363                 rmap_walk(folio, &rwc);          2175                 rmap_walk(folio, &rwc);
2364 }                                                2176 }
2365                                                  2177 
2366 #ifdef CONFIG_DEVICE_PRIVATE                     2178 #ifdef CONFIG_DEVICE_PRIVATE
2367 struct make_exclusive_args {                     2179 struct make_exclusive_args {
2368         struct mm_struct *mm;                    2180         struct mm_struct *mm;
2369         unsigned long address;                   2181         unsigned long address;
2370         void *owner;                             2182         void *owner;
2371         bool valid;                              2183         bool valid;
2372 };                                               2184 };
2373                                                  2185 
2374 static bool page_make_device_exclusive_one(st    2186 static bool page_make_device_exclusive_one(struct folio *folio,
2375                 struct vm_area_struct *vma, u    2187                 struct vm_area_struct *vma, unsigned long address, void *priv)
2376 {                                                2188 {
2377         struct mm_struct *mm = vma->vm_mm;       2189         struct mm_struct *mm = vma->vm_mm;
2378         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vm    2190         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2379         struct make_exclusive_args *args = pr    2191         struct make_exclusive_args *args = priv;
2380         pte_t pteval;                            2192         pte_t pteval;
2381         struct page *subpage;                    2193         struct page *subpage;
2382         bool ret = true;                         2194         bool ret = true;
2383         struct mmu_notifier_range range;         2195         struct mmu_notifier_range range;
2384         swp_entry_t entry;                       2196         swp_entry_t entry;
2385         pte_t swp_pte;                           2197         pte_t swp_pte;
2386         pte_t ptent;                          << 
2387                                                  2198 
2388         mmu_notifier_range_init_owner(&range, !! 2199         mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
2389                                       vma->vm    2200                                       vma->vm_mm, address, min(vma->vm_end,
2390                                       address    2201                                       address + folio_size(folio)),
2391                                       args->o    2202                                       args->owner);
2392         mmu_notifier_invalidate_range_start(&    2203         mmu_notifier_invalidate_range_start(&range);
2393                                                  2204 
2394         while (page_vma_mapped_walk(&pvmw)) {    2205         while (page_vma_mapped_walk(&pvmw)) {
2395                 /* Unexpected PMD-mapped THP?    2206                 /* Unexpected PMD-mapped THP? */
2396                 VM_BUG_ON_FOLIO(!pvmw.pte, fo    2207                 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2397                                                  2208 
2398                 ptent = ptep_get(pvmw.pte);   !! 2209                 if (!pte_present(*pvmw.pte)) {
2399                 if (!pte_present(ptent)) {    << 
2400                         ret = false;             2210                         ret = false;
2401                         page_vma_mapped_walk_    2211                         page_vma_mapped_walk_done(&pvmw);
2402                         break;                   2212                         break;
2403                 }                                2213                 }
2404                                                  2214 
2405                 subpage = folio_page(folio,      2215                 subpage = folio_page(folio,
2406                                 pte_pfn(ptent !! 2216                                 pte_pfn(*pvmw.pte) - folio_pfn(folio));
2407                 address = pvmw.address;          2217                 address = pvmw.address;
2408                                                  2218 
2409                 /* Nuke the page table entry.    2219                 /* Nuke the page table entry. */
2410                 flush_cache_page(vma, address !! 2220                 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
2411                 pteval = ptep_clear_flush(vma    2221                 pteval = ptep_clear_flush(vma, address, pvmw.pte);
2412                                                  2222 
2413                 /* Set the dirty flag on the     2223                 /* Set the dirty flag on the folio now the pte is gone. */
2414                 if (pte_dirty(pteval))           2224                 if (pte_dirty(pteval))
2415                         folio_mark_dirty(foli    2225                         folio_mark_dirty(folio);
2416                                                  2226 
2417                 /*                               2227                 /*
2418                  * Check that our target page    2228                  * Check that our target page is still mapped at the expected
2419                  * address.                      2229                  * address.
2420                  */                              2230                  */
2421                 if (args->mm == mm && args->a    2231                 if (args->mm == mm && args->address == address &&
2422                     pte_write(pteval))           2232                     pte_write(pteval))
2423                         args->valid = true;      2233                         args->valid = true;
2424                                                  2234 
2425                 /*                               2235                 /*
2426                  * Store the pfn of the page     2236                  * Store the pfn of the page in a special migration
2427                  * pte. do_swap_page() will w    2237                  * pte. do_swap_page() will wait until the migration
2428                  * pte is removed and then re    2238                  * pte is removed and then restart fault handling.
2429                  */                              2239                  */
2430                 if (pte_write(pteval))           2240                 if (pte_write(pteval))
2431                         entry = make_writable    2241                         entry = make_writable_device_exclusive_entry(
2432                                                  2242                                                         page_to_pfn(subpage));
2433                 else                             2243                 else
2434                         entry = make_readable    2244                         entry = make_readable_device_exclusive_entry(
2435                                                  2245                                                         page_to_pfn(subpage));
2436                 swp_pte = swp_entry_to_pte(en    2246                 swp_pte = swp_entry_to_pte(entry);
2437                 if (pte_soft_dirty(pteval))      2247                 if (pte_soft_dirty(pteval))
2438                         swp_pte = pte_swp_mks    2248                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
2439                 if (pte_uffd_wp(pteval))         2249                 if (pte_uffd_wp(pteval))
2440                         swp_pte = pte_swp_mku    2250                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
2441                                                  2251 
2442                 set_pte_at(mm, address, pvmw.    2252                 set_pte_at(mm, address, pvmw.pte, swp_pte);
2443                                                  2253 
2444                 /*                               2254                 /*
2445                  * There is a reference on th    2255                  * There is a reference on the page for the swap entry which has
2446                  * been removed, so shouldn't    2256                  * been removed, so shouldn't take another.
2447                  */                              2257                  */
2448                 folio_remove_rmap_pte(folio,  !! 2258                 page_remove_rmap(subpage, vma, false);
2449         }                                        2259         }
2450                                                  2260 
2451         mmu_notifier_invalidate_range_end(&ra    2261         mmu_notifier_invalidate_range_end(&range);
2452                                                  2262 
2453         return ret;                              2263         return ret;
2454 }                                                2264 }
2455                                                  2265 
2456 /**                                              2266 /**
2457  * folio_make_device_exclusive - Mark the fol    2267  * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2458  * @folio: The folio to replace page table en    2268  * @folio: The folio to replace page table entries for.
2459  * @mm: The mm_struct where the folio is expe    2269  * @mm: The mm_struct where the folio is expected to be mapped.
2460  * @address: Address where the folio is expec    2270  * @address: Address where the folio is expected to be mapped.
2461  * @owner: passed to MMU_NOTIFY_EXCLUSIVE ran    2271  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2462  *                                               2272  *
2463  * Tries to remove all the page table entries    2273  * Tries to remove all the page table entries which are mapping this
2464  * folio and replace them with special device    2274  * folio and replace them with special device exclusive swap entries to
2465  * grant a device exclusive access to the fol    2275  * grant a device exclusive access to the folio.
2466  *                                               2276  *
2467  * Context: Caller must hold the folio lock.     2277  * Context: Caller must hold the folio lock.
2468  * Return: false if the page is still mapped,    2278  * Return: false if the page is still mapped, or if it could not be unmapped
2469  * from the expected address. Otherwise retur    2279  * from the expected address. Otherwise returns true (success).
2470  */                                              2280  */
2471 static bool folio_make_device_exclusive(struc    2281 static bool folio_make_device_exclusive(struct folio *folio,
2472                 struct mm_struct *mm, unsigne    2282                 struct mm_struct *mm, unsigned long address, void *owner)
2473 {                                                2283 {
2474         struct make_exclusive_args args = {      2284         struct make_exclusive_args args = {
2475                 .mm = mm,                        2285                 .mm = mm,
2476                 .address = address,              2286                 .address = address,
2477                 .owner = owner,                  2287                 .owner = owner,
2478                 .valid = false,                  2288                 .valid = false,
2479         };                                       2289         };
2480         struct rmap_walk_control rwc = {         2290         struct rmap_walk_control rwc = {
2481                 .rmap_one = page_make_device_    2291                 .rmap_one = page_make_device_exclusive_one,
2482                 .done = folio_not_mapped,        2292                 .done = folio_not_mapped,
2483                 .anon_lock = folio_lock_anon_    2293                 .anon_lock = folio_lock_anon_vma_read,
2484                 .arg = &args,                    2294                 .arg = &args,
2485         };                                       2295         };
2486                                                  2296 
2487         /*                                       2297         /*
2488          * Restrict to anonymous folios for n    2298          * Restrict to anonymous folios for now to avoid potential writeback
2489          * issues.                               2299          * issues.
2490          */                                      2300          */
2491         if (!folio_test_anon(folio))             2301         if (!folio_test_anon(folio))
2492                 return false;                    2302                 return false;
2493                                                  2303 
2494         rmap_walk(folio, &rwc);                  2304         rmap_walk(folio, &rwc);
2495                                                  2305 
2496         return args.valid && !folio_mapcount(    2306         return args.valid && !folio_mapcount(folio);
2497 }                                                2307 }
2498                                                  2308 
2499 /**                                              2309 /**
2500  * make_device_exclusive_range() - Mark a ran    2310  * make_device_exclusive_range() - Mark a range for exclusive use by a device
2501  * @mm: mm_struct of associated target proces    2311  * @mm: mm_struct of associated target process
2502  * @start: start of the region to mark for ex    2312  * @start: start of the region to mark for exclusive device access
2503  * @end: end address of region                   2313  * @end: end address of region
2504  * @pages: returns the pages which were succe    2314  * @pages: returns the pages which were successfully marked for exclusive access
2505  * @owner: passed to MMU_NOTIFY_EXCLUSIVE ran    2315  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2506  *                                               2316  *
2507  * Returns: number of pages found in the rang    2317  * Returns: number of pages found in the range by GUP. A page is marked for
2508  * exclusive access only if the page pointer     2318  * exclusive access only if the page pointer is non-NULL.
2509  *                                               2319  *
2510  * This function finds ptes mapping page(s) t    2320  * This function finds ptes mapping page(s) to the given address range, locks
2511  * them and replaces mappings with special sw    2321  * them and replaces mappings with special swap entries preventing userspace CPU
2512  * access. On fault these entries are replace    2322  * access. On fault these entries are replaced with the original mapping after
2513  * calling MMU notifiers.                        2323  * calling MMU notifiers.
2514  *                                               2324  *
2515  * A driver using this to program access from    2325  * A driver using this to program access from a device must use a mmu notifier
2516  * critical section to hold a device specific    2326  * critical section to hold a device specific lock during programming. Once
2517  * programming is complete it should drop the    2327  * programming is complete it should drop the page lock and reference after
2518  * which point CPU access to the page will re    2328  * which point CPU access to the page will revoke the exclusive access.
2519  */                                              2329  */
2520 int make_device_exclusive_range(struct mm_str    2330 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
2521                                 unsigned long    2331                                 unsigned long end, struct page **pages,
2522                                 void *owner)     2332                                 void *owner)
2523 {                                                2333 {
2524         long npages = (end - start) >> PAGE_S    2334         long npages = (end - start) >> PAGE_SHIFT;
2525         long i;                                  2335         long i;
2526                                                  2336 
2527         npages = get_user_pages_remote(mm, st    2337         npages = get_user_pages_remote(mm, start, npages,
2528                                        FOLL_G    2338                                        FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2529                                        pages, !! 2339                                        pages, NULL, NULL);
2530         if (npages < 0)                          2340         if (npages < 0)
2531                 return npages;                   2341                 return npages;
2532                                                  2342 
2533         for (i = 0; i < npages; i++, start +=    2343         for (i = 0; i < npages; i++, start += PAGE_SIZE) {
2534                 struct folio *folio = page_fo    2344                 struct folio *folio = page_folio(pages[i]);
2535                 if (PageTail(pages[i]) || !fo    2345                 if (PageTail(pages[i]) || !folio_trylock(folio)) {
2536                         folio_put(folio);        2346                         folio_put(folio);
2537                         pages[i] = NULL;         2347                         pages[i] = NULL;
2538                         continue;                2348                         continue;
2539                 }                                2349                 }
2540                                                  2350 
2541                 if (!folio_make_device_exclus    2351                 if (!folio_make_device_exclusive(folio, mm, start, owner)) {
2542                         folio_unlock(folio);     2352                         folio_unlock(folio);
2543                         folio_put(folio);        2353                         folio_put(folio);
2544                         pages[i] = NULL;         2354                         pages[i] = NULL;
2545                 }                                2355                 }
2546         }                                        2356         }
2547                                                  2357 
2548         return npages;                           2358         return npages;
2549 }                                                2359 }
2550 EXPORT_SYMBOL_GPL(make_device_exclusive_range    2360 EXPORT_SYMBOL_GPL(make_device_exclusive_range);
2551 #endif                                           2361 #endif
2552                                                  2362 
2553 void __put_anon_vma(struct anon_vma *anon_vma    2363 void __put_anon_vma(struct anon_vma *anon_vma)
2554 {                                                2364 {
2555         struct anon_vma *root = anon_vma->roo    2365         struct anon_vma *root = anon_vma->root;
2556                                                  2366 
2557         anon_vma_free(anon_vma);                 2367         anon_vma_free(anon_vma);
2558         if (root != anon_vma && atomic_dec_an    2368         if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2559                 anon_vma_free(root);             2369                 anon_vma_free(root);
2560 }                                                2370 }
2561                                                  2371 
2562 static struct anon_vma *rmap_walk_anon_lock(s    2372 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2563                                             s    2373                                             struct rmap_walk_control *rwc)
2564 {                                                2374 {
2565         struct anon_vma *anon_vma;               2375         struct anon_vma *anon_vma;
2566                                                  2376 
2567         if (rwc->anon_lock)                      2377         if (rwc->anon_lock)
2568                 return rwc->anon_lock(folio,     2378                 return rwc->anon_lock(folio, rwc);
2569                                                  2379 
2570         /*                                       2380         /*
2571          * Note: remove_migration_ptes() cann    2381          * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2572          * because that depends on page_mappe    2382          * because that depends on page_mapped(); but not all its usages
2573          * are holding mmap_lock. Users witho    2383          * are holding mmap_lock. Users without mmap_lock are required to
2574          * take a reference count to prevent     2384          * take a reference count to prevent the anon_vma disappearing
2575          */                                      2385          */
2576         anon_vma = folio_anon_vma(folio);        2386         anon_vma = folio_anon_vma(folio);
2577         if (!anon_vma)                           2387         if (!anon_vma)
2578                 return NULL;                     2388                 return NULL;
2579                                                  2389 
2580         if (anon_vma_trylock_read(anon_vma))     2390         if (anon_vma_trylock_read(anon_vma))
2581                 goto out;                        2391                 goto out;
2582                                                  2392 
2583         if (rwc->try_lock) {                     2393         if (rwc->try_lock) {
2584                 anon_vma = NULL;                 2394                 anon_vma = NULL;
2585                 rwc->contended = true;           2395                 rwc->contended = true;
2586                 goto out;                        2396                 goto out;
2587         }                                        2397         }
2588                                                  2398 
2589         anon_vma_lock_read(anon_vma);            2399         anon_vma_lock_read(anon_vma);
2590 out:                                             2400 out:
2591         return anon_vma;                         2401         return anon_vma;
2592 }                                                2402 }
2593                                                  2403 
2594 /*                                               2404 /*
2595  * rmap_walk_anon - do something to anonymous    2405  * rmap_walk_anon - do something to anonymous page using the object-based
2596  * rmap method                                   2406  * rmap method
2597  * @folio: the folio to be handled            !! 2407  * @page: the page to be handled
2598  * @rwc: control variable according to each w    2408  * @rwc: control variable according to each walk type
2599  * @locked: caller holds relevant rmap lock   << 
2600  *                                               2409  *
2601  * Find all the mappings of a folio using the !! 2410  * Find all the mappings of a page using the mapping pointer and the vma chains
2602  * chains contained in the anon_vma struct it !! 2411  * contained in the anon_vma struct it points to.
2603  */                                              2412  */
2604 static void rmap_walk_anon(struct folio *foli    2413 static void rmap_walk_anon(struct folio *folio,
2605                 struct rmap_walk_control *rwc    2414                 struct rmap_walk_control *rwc, bool locked)
2606 {                                                2415 {
2607         struct anon_vma *anon_vma;               2416         struct anon_vma *anon_vma;
2608         pgoff_t pgoff_start, pgoff_end;          2417         pgoff_t pgoff_start, pgoff_end;
2609         struct anon_vma_chain *avc;              2418         struct anon_vma_chain *avc;
2610                                                  2419 
2611         if (locked) {                            2420         if (locked) {
2612                 anon_vma = folio_anon_vma(fol    2421                 anon_vma = folio_anon_vma(folio);
2613                 /* anon_vma disappear under u    2422                 /* anon_vma disappear under us? */
2614                 VM_BUG_ON_FOLIO(!anon_vma, fo    2423                 VM_BUG_ON_FOLIO(!anon_vma, folio);
2615         } else {                                 2424         } else {
2616                 anon_vma = rmap_walk_anon_loc    2425                 anon_vma = rmap_walk_anon_lock(folio, rwc);
2617         }                                        2426         }
2618         if (!anon_vma)                           2427         if (!anon_vma)
2619                 return;                          2428                 return;
2620                                                  2429 
2621         pgoff_start = folio_pgoff(folio);        2430         pgoff_start = folio_pgoff(folio);
2622         pgoff_end = pgoff_start + folio_nr_pa    2431         pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2623         anon_vma_interval_tree_foreach(avc, &    2432         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2624                         pgoff_start, pgoff_en    2433                         pgoff_start, pgoff_end) {
2625                 struct vm_area_struct *vma =     2434                 struct vm_area_struct *vma = avc->vma;
2626                 unsigned long address = vma_a !! 2435                 unsigned long address = vma_address(&folio->page, vma);
2627                                 folio_nr_page << 
2628                                                  2436 
2629                 VM_BUG_ON_VMA(address == -EFA    2437                 VM_BUG_ON_VMA(address == -EFAULT, vma);
2630                 cond_resched();                  2438                 cond_resched();
2631                                                  2439 
2632                 if (rwc->invalid_vma && rwc->    2440                 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2633                         continue;                2441                         continue;
2634                                                  2442 
2635                 if (!rwc->rmap_one(folio, vma    2443                 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2636                         break;                   2444                         break;
2637                 if (rwc->done && rwc->done(fo    2445                 if (rwc->done && rwc->done(folio))
2638                         break;                   2446                         break;
2639         }                                        2447         }
2640                                                  2448 
2641         if (!locked)                             2449         if (!locked)
2642                 anon_vma_unlock_read(anon_vma    2450                 anon_vma_unlock_read(anon_vma);
2643 }                                                2451 }
2644                                                  2452 
2645 /*                                               2453 /*
2646  * rmap_walk_file - do something to file page    2454  * rmap_walk_file - do something to file page using the object-based rmap method
2647  * @folio: the folio to be handled            !! 2455  * @page: the page to be handled
2648  * @rwc: control variable according to each w    2456  * @rwc: control variable according to each walk type
2649  * @locked: caller holds relevant rmap lock   << 
2650  *                                               2457  *
2651  * Find all the mappings of a folio using the !! 2458  * Find all the mappings of a page using the mapping pointer and the vma chains
2652  * contained in the address_space struct it p    2459  * contained in the address_space struct it points to.
2653  */                                              2460  */
2654 static void rmap_walk_file(struct folio *foli    2461 static void rmap_walk_file(struct folio *folio,
2655                 struct rmap_walk_control *rwc    2462                 struct rmap_walk_control *rwc, bool locked)
2656 {                                                2463 {
2657         struct address_space *mapping = folio    2464         struct address_space *mapping = folio_mapping(folio);
2658         pgoff_t pgoff_start, pgoff_end;          2465         pgoff_t pgoff_start, pgoff_end;
2659         struct vm_area_struct *vma;              2466         struct vm_area_struct *vma;
2660                                                  2467 
2661         /*                                       2468         /*
2662          * The page lock not only makes sure     2469          * The page lock not only makes sure that page->mapping cannot
2663          * suddenly be NULLified by truncatio    2470          * suddenly be NULLified by truncation, it makes sure that the
2664          * structure at mapping cannot be fre    2471          * structure at mapping cannot be freed and reused yet,
2665          * so we can safely take mapping->i_m    2472          * so we can safely take mapping->i_mmap_rwsem.
2666          */                                      2473          */
2667         VM_BUG_ON_FOLIO(!folio_test_locked(fo    2474         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2668                                                  2475 
2669         if (!mapping)                            2476         if (!mapping)
2670                 return;                          2477                 return;
2671                                                  2478 
2672         pgoff_start = folio_pgoff(folio);        2479         pgoff_start = folio_pgoff(folio);
2673         pgoff_end = pgoff_start + folio_nr_pa    2480         pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2674         if (!locked) {                           2481         if (!locked) {
2675                 if (i_mmap_trylock_read(mappi    2482                 if (i_mmap_trylock_read(mapping))
2676                         goto lookup;             2483                         goto lookup;
2677                                                  2484 
2678                 if (rwc->try_lock) {             2485                 if (rwc->try_lock) {
2679                         rwc->contended = true    2486                         rwc->contended = true;
2680                         return;                  2487                         return;
2681                 }                                2488                 }
2682                                                  2489 
2683                 i_mmap_lock_read(mapping);       2490                 i_mmap_lock_read(mapping);
2684         }                                        2491         }
2685 lookup:                                          2492 lookup:
2686         vma_interval_tree_foreach(vma, &mappi    2493         vma_interval_tree_foreach(vma, &mapping->i_mmap,
2687                         pgoff_start, pgoff_en    2494                         pgoff_start, pgoff_end) {
2688                 unsigned long address = vma_a !! 2495                 unsigned long address = vma_address(&folio->page, vma);
2689                                folio_nr_pages << 
2690                                                  2496 
2691                 VM_BUG_ON_VMA(address == -EFA    2497                 VM_BUG_ON_VMA(address == -EFAULT, vma);
2692                 cond_resched();                  2498                 cond_resched();
2693                                                  2499 
2694                 if (rwc->invalid_vma && rwc->    2500                 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2695                         continue;                2501                         continue;
2696                                                  2502 
2697                 if (!rwc->rmap_one(folio, vma    2503                 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2698                         goto done;               2504                         goto done;
2699                 if (rwc->done && rwc->done(fo    2505                 if (rwc->done && rwc->done(folio))
2700                         goto done;               2506                         goto done;
2701         }                                        2507         }
2702                                                  2508 
2703 done:                                            2509 done:
2704         if (!locked)                             2510         if (!locked)
2705                 i_mmap_unlock_read(mapping);     2511                 i_mmap_unlock_read(mapping);
2706 }                                                2512 }
2707                                                  2513 
2708 void rmap_walk(struct folio *folio, struct rm    2514 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
2709 {                                                2515 {
2710         if (unlikely(folio_test_ksm(folio)))     2516         if (unlikely(folio_test_ksm(folio)))
2711                 rmap_walk_ksm(folio, rwc);       2517                 rmap_walk_ksm(folio, rwc);
2712         else if (folio_test_anon(folio))         2518         else if (folio_test_anon(folio))
2713                 rmap_walk_anon(folio, rwc, fa    2519                 rmap_walk_anon(folio, rwc, false);
2714         else                                     2520         else
2715                 rmap_walk_file(folio, rwc, fa    2521                 rmap_walk_file(folio, rwc, false);
2716 }                                                2522 }
2717                                                  2523 
2718 /* Like rmap_walk, but caller holds relevant     2524 /* Like rmap_walk, but caller holds relevant rmap lock */
2719 void rmap_walk_locked(struct folio *folio, st    2525 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
2720 {                                                2526 {
2721         /* no ksm support for now */             2527         /* no ksm support for now */
2722         VM_BUG_ON_FOLIO(folio_test_ksm(folio)    2528         VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
2723         if (folio_test_anon(folio))              2529         if (folio_test_anon(folio))
2724                 rmap_walk_anon(folio, rwc, tr    2530                 rmap_walk_anon(folio, rwc, true);
2725         else                                     2531         else
2726                 rmap_walk_file(folio, rwc, tr    2532                 rmap_walk_file(folio, rwc, true);
2727 }                                                2533 }
2728                                                  2534 
2729 #ifdef CONFIG_HUGETLB_PAGE                       2535 #ifdef CONFIG_HUGETLB_PAGE
2730 /*                                               2536 /*
2731  * The following two functions are for anonym    2537  * The following two functions are for anonymous (private mapped) hugepages.
2732  * Unlike common anonymous pages, anonymous h    2538  * Unlike common anonymous pages, anonymous hugepages have no accounting code
2733  * and no lru code, because we handle hugepag    2539  * and no lru code, because we handle hugepages differently from common pages.
                                                   >> 2540  *
                                                   >> 2541  * RMAP_COMPOUND is ignored.
2734  */                                              2542  */
2735 void hugetlb_add_anon_rmap(struct folio *foli !! 2543 void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
2736                 unsigned long address, rmap_t !! 2544                             unsigned long address, rmap_t flags)
2737 {                                                2545 {
2738         VM_WARN_ON_FOLIO(!folio_test_hugetlb( !! 2546         struct anon_vma *anon_vma = vma->anon_vma;
2739         VM_WARN_ON_FOLIO(!folio_test_anon(fol !! 2547         int first;
2740                                                  2548 
2741         atomic_inc(&folio->_entire_mapcount); !! 2549         BUG_ON(!PageLocked(page));
2742         atomic_inc(&folio->_large_mapcount);  !! 2550         BUG_ON(!anon_vma);
2743         if (flags & RMAP_EXCLUSIVE)           !! 2551         /* address might be in next vma when migration races vma_adjust */
2744                 SetPageAnonExclusive(&folio-> !! 2552         first = atomic_inc_and_test(compound_mapcount_ptr(page));
2745         VM_WARN_ON_FOLIO(folio_entire_mapcoun !! 2553         VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
2746                          PageAnonExclusive(&f !! 2554         VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
                                                   >> 2555         if (first)
                                                   >> 2556                 __page_set_anon_rmap(page, vma, address,
                                                   >> 2557                                      !!(flags & RMAP_EXCLUSIVE));
2747 }                                                2558 }
2748                                                  2559 
2749 void hugetlb_add_new_anon_rmap(struct folio * !! 2560 void hugepage_add_new_anon_rmap(struct page *page,
2750                 struct vm_area_struct *vma, u !! 2561                         struct vm_area_struct *vma, unsigned long address)
2751 {                                                2562 {
2752         VM_WARN_ON_FOLIO(!folio_test_hugetlb( << 
2753                                               << 
2754         BUG_ON(address < vma->vm_start || add    2563         BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2755         /* increment count (starts at -1) */     2564         /* increment count (starts at -1) */
2756         atomic_set(&folio->_entire_mapcount,  !! 2565         atomic_set(compound_mapcount_ptr(page), 0);
2757         atomic_set(&folio->_large_mapcount, 0 !! 2566         ClearHPageRestoreReserve(page);
2758         folio_clear_hugetlb_restore_reserve(f !! 2567         __page_set_anon_rmap(page, vma, address, 1);
2759         __folio_set_anon(folio, vma, address, << 
2760         SetPageAnonExclusive(&folio->page);   << 
2761 }                                                2568 }
2762 #endif /* CONFIG_HUGETLB_PAGE */                 2569 #endif /* CONFIG_HUGETLB_PAGE */
2763                                                  2570 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php