~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/dax.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/dax.c (Version linux-6.12-rc7) and /fs/dax.c (Version linux-6.2.16)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  * fs/dax.c - Direct Access filesystem code         3  * fs/dax.c - Direct Access filesystem code
  4  * Copyright (c) 2013-2014 Intel Corporation        4  * Copyright (c) 2013-2014 Intel Corporation
  5  * Author: Matthew Wilcox <matthew.r.wilcox@in      5  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
  6  * Author: Ross Zwisler <ross.zwisler@linux.in      6  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
  7  */                                                 7  */
  8                                                     8 
  9 #include <linux/atomic.h>                           9 #include <linux/atomic.h>
 10 #include <linux/blkdev.h>                          10 #include <linux/blkdev.h>
 11 #include <linux/buffer_head.h>                     11 #include <linux/buffer_head.h>
 12 #include <linux/dax.h>                             12 #include <linux/dax.h>
 13 #include <linux/fs.h>                              13 #include <linux/fs.h>
 14 #include <linux/highmem.h>                         14 #include <linux/highmem.h>
 15 #include <linux/memcontrol.h>                      15 #include <linux/memcontrol.h>
 16 #include <linux/mm.h>                              16 #include <linux/mm.h>
 17 #include <linux/mutex.h>                           17 #include <linux/mutex.h>
 18 #include <linux/pagevec.h>                         18 #include <linux/pagevec.h>
 19 #include <linux/sched.h>                           19 #include <linux/sched.h>
 20 #include <linux/sched/signal.h>                    20 #include <linux/sched/signal.h>
 21 #include <linux/uio.h>                             21 #include <linux/uio.h>
 22 #include <linux/vmstat.h>                          22 #include <linux/vmstat.h>
 23 #include <linux/pfn_t.h>                           23 #include <linux/pfn_t.h>
 24 #include <linux/sizes.h>                           24 #include <linux/sizes.h>
 25 #include <linux/mmu_notifier.h>                    25 #include <linux/mmu_notifier.h>
 26 #include <linux/iomap.h>                           26 #include <linux/iomap.h>
 27 #include <linux/rmap.h>                            27 #include <linux/rmap.h>
 28 #include <asm/pgalloc.h>                           28 #include <asm/pgalloc.h>
 29                                                    29 
 30 #define CREATE_TRACE_POINTS                        30 #define CREATE_TRACE_POINTS
 31 #include <trace/events/fs_dax.h>                   31 #include <trace/events/fs_dax.h>
 32                                                    32 
                                                   >>  33 static inline unsigned int pe_order(enum page_entry_size pe_size)
                                                   >>  34 {
                                                   >>  35         if (pe_size == PE_SIZE_PTE)
                                                   >>  36                 return PAGE_SHIFT - PAGE_SHIFT;
                                                   >>  37         if (pe_size == PE_SIZE_PMD)
                                                   >>  38                 return PMD_SHIFT - PAGE_SHIFT;
                                                   >>  39         if (pe_size == PE_SIZE_PUD)
                                                   >>  40                 return PUD_SHIFT - PAGE_SHIFT;
                                                   >>  41         return ~0;
                                                   >>  42 }
                                                   >>  43 
 33 /* We choose 4096 entries - same as per-zone p     44 /* We choose 4096 entries - same as per-zone page wait tables */
 34 #define DAX_WAIT_TABLE_BITS 12                     45 #define DAX_WAIT_TABLE_BITS 12
 35 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_     46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
 36                                                    47 
 37 /* The 'colour' (ie low bits) within a PMD of      48 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
 38 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHI     49 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
 39 #define PG_PMD_NR       (PMD_SIZE >> PAGE_SHIF     50 #define PG_PMD_NR       (PMD_SIZE >> PAGE_SHIFT)
 40                                                    51 
                                                   >>  52 /* The order of a PMD entry */
                                                   >>  53 #define PMD_ORDER       (PMD_SHIFT - PAGE_SHIFT)
                                                   >>  54 
 41 static wait_queue_head_t wait_table[DAX_WAIT_T     55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 42                                                    56 
 43 static int __init init_dax_wait_table(void)        57 static int __init init_dax_wait_table(void)
 44 {                                                  58 {
 45         int i;                                     59         int i;
 46                                                    60 
 47         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES     61         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
 48                 init_waitqueue_head(wait_table     62                 init_waitqueue_head(wait_table + i);
 49         return 0;                                  63         return 0;
 50 }                                                  64 }
 51 fs_initcall(init_dax_wait_table);                  65 fs_initcall(init_dax_wait_table);
 52                                                    66 
 53 /*                                                 67 /*
 54  * DAX pagecache entries use XArray value entr     68  * DAX pagecache entries use XArray value entries so they can't be mistaken
 55  * for pages.  We use one bit for locking, one     69  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
 56  * and two more to tell us if the entry is a z     70  * and two more to tell us if the entry is a zero page or an empty entry that
 57  * is just used for locking.  In total four sp     71  * is just used for locking.  In total four special bits.
 58  *                                                 72  *
 59  * If the PMD bit isn't set the entry has size     73  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
 60  * and EMPTY bits aren't set the entry is a no     74  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
 61  * block allocation.                               75  * block allocation.
 62  */                                                76  */
 63 #define DAX_SHIFT       (4)                        77 #define DAX_SHIFT       (4)
 64 #define DAX_LOCKED      (1UL << 0)                 78 #define DAX_LOCKED      (1UL << 0)
 65 #define DAX_PMD         (1UL << 1)                 79 #define DAX_PMD         (1UL << 1)
 66 #define DAX_ZERO_PAGE   (1UL << 2)                 80 #define DAX_ZERO_PAGE   (1UL << 2)
 67 #define DAX_EMPTY       (1UL << 3)                 81 #define DAX_EMPTY       (1UL << 3)
 68                                                    82 
 69 static unsigned long dax_to_pfn(void *entry)       83 static unsigned long dax_to_pfn(void *entry)
 70 {                                                  84 {
 71         return xa_to_value(entry) >> DAX_SHIFT     85         return xa_to_value(entry) >> DAX_SHIFT;
 72 }                                                  86 }
 73                                                    87 
 74 static void *dax_make_entry(pfn_t pfn, unsigne     88 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
 75 {                                                  89 {
 76         return xa_mk_value(flags | (pfn_t_to_p     90         return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
 77 }                                                  91 }
 78                                                    92 
 79 static bool dax_is_locked(void *entry)             93 static bool dax_is_locked(void *entry)
 80 {                                                  94 {
 81         return xa_to_value(entry) & DAX_LOCKED     95         return xa_to_value(entry) & DAX_LOCKED;
 82 }                                                  96 }
 83                                                    97 
 84 static unsigned int dax_entry_order(void *entr     98 static unsigned int dax_entry_order(void *entry)
 85 {                                                  99 {
 86         if (xa_to_value(entry) & DAX_PMD)         100         if (xa_to_value(entry) & DAX_PMD)
 87                 return PMD_ORDER;                 101                 return PMD_ORDER;
 88         return 0;                                 102         return 0;
 89 }                                                 103 }
 90                                                   104 
 91 static unsigned long dax_is_pmd_entry(void *en    105 static unsigned long dax_is_pmd_entry(void *entry)
 92 {                                                 106 {
 93         return xa_to_value(entry) & DAX_PMD;      107         return xa_to_value(entry) & DAX_PMD;
 94 }                                                 108 }
 95                                                   109 
 96 static bool dax_is_pte_entry(void *entry)         110 static bool dax_is_pte_entry(void *entry)
 97 {                                                 111 {
 98         return !(xa_to_value(entry) & DAX_PMD)    112         return !(xa_to_value(entry) & DAX_PMD);
 99 }                                                 113 }
100                                                   114 
101 static int dax_is_zero_entry(void *entry)         115 static int dax_is_zero_entry(void *entry)
102 {                                                 116 {
103         return xa_to_value(entry) & DAX_ZERO_P    117         return xa_to_value(entry) & DAX_ZERO_PAGE;
104 }                                                 118 }
105                                                   119 
106 static int dax_is_empty_entry(void *entry)        120 static int dax_is_empty_entry(void *entry)
107 {                                                 121 {
108         return xa_to_value(entry) & DAX_EMPTY;    122         return xa_to_value(entry) & DAX_EMPTY;
109 }                                                 123 }
110                                                   124 
111 /*                                                125 /*
112  * true if the entry that was found is of a sm    126  * true if the entry that was found is of a smaller order than the entry
113  * we were looking for                            127  * we were looking for
114  */                                               128  */
115 static bool dax_is_conflict(void *entry)          129 static bool dax_is_conflict(void *entry)
116 {                                                 130 {
117         return entry == XA_RETRY_ENTRY;           131         return entry == XA_RETRY_ENTRY;
118 }                                                 132 }
119                                                   133 
120 /*                                                134 /*
121  * DAX page cache entry locking                   135  * DAX page cache entry locking
122  */                                               136  */
123 struct exceptional_entry_key {                    137 struct exceptional_entry_key {
124         struct xarray *xa;                        138         struct xarray *xa;
125         pgoff_t entry_start;                      139         pgoff_t entry_start;
126 };                                                140 };
127                                                   141 
128 struct wait_exceptional_entry_queue {             142 struct wait_exceptional_entry_queue {
129         wait_queue_entry_t wait;                  143         wait_queue_entry_t wait;
130         struct exceptional_entry_key key;         144         struct exceptional_entry_key key;
131 };                                                145 };
132                                                   146 
133 /**                                               147 /**
134  * enum dax_wake_mode: waitqueue wakeup behavi    148  * enum dax_wake_mode: waitqueue wakeup behaviour
135  * @WAKE_ALL: wake all waiters in the waitqueu    149  * @WAKE_ALL: wake all waiters in the waitqueue
136  * @WAKE_NEXT: wake only the first waiter in t    150  * @WAKE_NEXT: wake only the first waiter in the waitqueue
137  */                                               151  */
138 enum dax_wake_mode {                              152 enum dax_wake_mode {
139         WAKE_ALL,                                 153         WAKE_ALL,
140         WAKE_NEXT,                                154         WAKE_NEXT,
141 };                                                155 };
142                                                   156 
143 static wait_queue_head_t *dax_entry_waitqueue(    157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
144                 void *entry, struct exceptiona    158                 void *entry, struct exceptional_entry_key *key)
145 {                                                 159 {
146         unsigned long hash;                       160         unsigned long hash;
147         unsigned long index = xas->xa_index;      161         unsigned long index = xas->xa_index;
148                                                   162 
149         /*                                        163         /*
150          * If 'entry' is a PMD, align the 'ind    164          * If 'entry' is a PMD, align the 'index' that we use for the wait
151          * queue to the start of that PMD.  Th    165          * queue to the start of that PMD.  This ensures that all offsets in
152          * the range covered by the PMD map to    166          * the range covered by the PMD map to the same bit lock.
153          */                                       167          */
154         if (dax_is_pmd_entry(entry))              168         if (dax_is_pmd_entry(entry))
155                 index &= ~PG_PMD_COLOUR;          169                 index &= ~PG_PMD_COLOUR;
156         key->xa = xas->xa;                        170         key->xa = xas->xa;
157         key->entry_start = index;                 171         key->entry_start = index;
158                                                   172 
159         hash = hash_long((unsigned long)xas->x    173         hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
160         return wait_table + hash;                 174         return wait_table + hash;
161 }                                                 175 }
162                                                   176 
163 static int wake_exceptional_entry_func(wait_qu    177 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
164                 unsigned int mode, int sync, v    178                 unsigned int mode, int sync, void *keyp)
165 {                                                 179 {
166         struct exceptional_entry_key *key = ke    180         struct exceptional_entry_key *key = keyp;
167         struct wait_exceptional_entry_queue *e    181         struct wait_exceptional_entry_queue *ewait =
168                 container_of(wait, struct wait    182                 container_of(wait, struct wait_exceptional_entry_queue, wait);
169                                                   183 
170         if (key->xa != ewait->key.xa ||           184         if (key->xa != ewait->key.xa ||
171             key->entry_start != ewait->key.ent    185             key->entry_start != ewait->key.entry_start)
172                 return 0;                         186                 return 0;
173         return autoremove_wake_function(wait,     187         return autoremove_wake_function(wait, mode, sync, NULL);
174 }                                                 188 }
175                                                   189 
176 /*                                                190 /*
177  * @entry may no longer be the entry at the in    191  * @entry may no longer be the entry at the index in the mapping.
178  * The important information it's conveying is    192  * The important information it's conveying is whether the entry at
179  * this index used to be a PMD entry.             193  * this index used to be a PMD entry.
180  */                                               194  */
181 static void dax_wake_entry(struct xa_state *xa    195 static void dax_wake_entry(struct xa_state *xas, void *entry,
182                            enum dax_wake_mode     196                            enum dax_wake_mode mode)
183 {                                                 197 {
184         struct exceptional_entry_key key;         198         struct exceptional_entry_key key;
185         wait_queue_head_t *wq;                    199         wait_queue_head_t *wq;
186                                                   200 
187         wq = dax_entry_waitqueue(xas, entry, &    201         wq = dax_entry_waitqueue(xas, entry, &key);
188                                                   202 
189         /*                                        203         /*
190          * Checking for locked entry and prepa    204          * Checking for locked entry and prepare_to_wait_exclusive() happens
191          * under the i_pages lock, ditto for e    205          * under the i_pages lock, ditto for entry handling in our callers.
192          * So at this point all tasks that cou    206          * So at this point all tasks that could have seen our entry locked
193          * must be in the waitqueue and the fo    207          * must be in the waitqueue and the following check will see them.
194          */                                       208          */
195         if (waitqueue_active(wq))                 209         if (waitqueue_active(wq))
196                 __wake_up(wq, TASK_NORMAL, mod    210                 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
197 }                                                 211 }
198                                                   212 
199 /*                                                213 /*
200  * Look up entry in page cache, wait for it to    214  * Look up entry in page cache, wait for it to become unlocked if it
201  * is a DAX entry and return it.  The caller m    215  * is a DAX entry and return it.  The caller must subsequently call
202  * put_unlocked_entry() if it did not lock the    216  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
203  * if it did.  The entry returned may have a l    217  * if it did.  The entry returned may have a larger order than @order.
204  * If @order is larger than the order of the e    218  * If @order is larger than the order of the entry found in i_pages, this
205  * function returns a dax_is_conflict entry.      219  * function returns a dax_is_conflict entry.
206  *                                                220  *
207  * Must be called with the i_pages lock held.     221  * Must be called with the i_pages lock held.
208  */                                               222  */
209 static void *get_unlocked_entry(struct xa_stat    223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
210 {                                                 224 {
211         void *entry;                              225         void *entry;
212         struct wait_exceptional_entry_queue ew    226         struct wait_exceptional_entry_queue ewait;
213         wait_queue_head_t *wq;                    227         wait_queue_head_t *wq;
214                                                   228 
215         init_wait(&ewait.wait);                   229         init_wait(&ewait.wait);
216         ewait.wait.func = wake_exceptional_ent    230         ewait.wait.func = wake_exceptional_entry_func;
217                                                   231 
218         for (;;) {                                232         for (;;) {
219                 entry = xas_find_conflict(xas)    233                 entry = xas_find_conflict(xas);
220                 if (!entry || WARN_ON_ONCE(!xa    234                 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
221                         return entry;             235                         return entry;
222                 if (dax_entry_order(entry) < o    236                 if (dax_entry_order(entry) < order)
223                         return XA_RETRY_ENTRY;    237                         return XA_RETRY_ENTRY;
224                 if (!dax_is_locked(entry))        238                 if (!dax_is_locked(entry))
225                         return entry;             239                         return entry;
226                                                   240 
227                 wq = dax_entry_waitqueue(xas,     241                 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
228                 prepare_to_wait_exclusive(wq,     242                 prepare_to_wait_exclusive(wq, &ewait.wait,
229                                           TASK    243                                           TASK_UNINTERRUPTIBLE);
230                 xas_unlock_irq(xas);              244                 xas_unlock_irq(xas);
231                 xas_reset(xas);                   245                 xas_reset(xas);
232                 schedule();                       246                 schedule();
233                 finish_wait(wq, &ewait.wait);     247                 finish_wait(wq, &ewait.wait);
234                 xas_lock_irq(xas);                248                 xas_lock_irq(xas);
235         }                                         249         }
236 }                                                 250 }
237                                                   251 
238 /*                                                252 /*
239  * The only thing keeping the address space ar    253  * The only thing keeping the address space around is the i_pages lock
240  * (it's cycled in clear_inode() after removin    254  * (it's cycled in clear_inode() after removing the entries from i_pages)
241  * After we call xas_unlock_irq(), we cannot t    255  * After we call xas_unlock_irq(), we cannot touch xas->xa.
242  */                                               256  */
243 static void wait_entry_unlocked(struct xa_stat    257 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
244 {                                                 258 {
245         struct wait_exceptional_entry_queue ew    259         struct wait_exceptional_entry_queue ewait;
246         wait_queue_head_t *wq;                    260         wait_queue_head_t *wq;
247                                                   261 
248         init_wait(&ewait.wait);                   262         init_wait(&ewait.wait);
249         ewait.wait.func = wake_exceptional_ent    263         ewait.wait.func = wake_exceptional_entry_func;
250                                                   264 
251         wq = dax_entry_waitqueue(xas, entry, &    265         wq = dax_entry_waitqueue(xas, entry, &ewait.key);
252         /*                                        266         /*
253          * Unlike get_unlocked_entry() there i    267          * Unlike get_unlocked_entry() there is no guarantee that this
254          * path ever successfully retrieves an    268          * path ever successfully retrieves an unlocked entry before an
255          * inode dies. Perform a non-exclusive    269          * inode dies. Perform a non-exclusive wait in case this path
256          * never successfully performs its own    270          * never successfully performs its own wake up.
257          */                                       271          */
258         prepare_to_wait(wq, &ewait.wait, TASK_    272         prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
259         xas_unlock_irq(xas);                      273         xas_unlock_irq(xas);
260         schedule();                               274         schedule();
261         finish_wait(wq, &ewait.wait);             275         finish_wait(wq, &ewait.wait);
262 }                                                 276 }
263                                                   277 
264 static void put_unlocked_entry(struct xa_state    278 static void put_unlocked_entry(struct xa_state *xas, void *entry,
265                                enum dax_wake_m    279                                enum dax_wake_mode mode)
266 {                                                 280 {
267         if (entry && !dax_is_conflict(entry))     281         if (entry && !dax_is_conflict(entry))
268                 dax_wake_entry(xas, entry, mod    282                 dax_wake_entry(xas, entry, mode);
269 }                                                 283 }
270                                                   284 
271 /*                                                285 /*
272  * We used the xa_state to get the entry, but     286  * We used the xa_state to get the entry, but then we locked the entry and
273  * dropped the xa_lock, so we know the xa_stat    287  * dropped the xa_lock, so we know the xa_state is stale and must be reset
274  * before use.                                    288  * before use.
275  */                                               289  */
276 static void dax_unlock_entry(struct xa_state *    290 static void dax_unlock_entry(struct xa_state *xas, void *entry)
277 {                                                 291 {
278         void *old;                                292         void *old;
279                                                   293 
280         BUG_ON(dax_is_locked(entry));             294         BUG_ON(dax_is_locked(entry));
281         xas_reset(xas);                           295         xas_reset(xas);
282         xas_lock_irq(xas);                        296         xas_lock_irq(xas);
283         old = xas_store(xas, entry);              297         old = xas_store(xas, entry);
284         xas_unlock_irq(xas);                      298         xas_unlock_irq(xas);
285         BUG_ON(!dax_is_locked(old));              299         BUG_ON(!dax_is_locked(old));
286         dax_wake_entry(xas, entry, WAKE_NEXT);    300         dax_wake_entry(xas, entry, WAKE_NEXT);
287 }                                                 301 }
288                                                   302 
289 /*                                                303 /*
290  * Return: The entry stored at this location b    304  * Return: The entry stored at this location before it was locked.
291  */                                               305  */
292 static void *dax_lock_entry(struct xa_state *x    306 static void *dax_lock_entry(struct xa_state *xas, void *entry)
293 {                                                 307 {
294         unsigned long v = xa_to_value(entry);     308         unsigned long v = xa_to_value(entry);
295         return xas_store(xas, xa_mk_value(v |     309         return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
296 }                                                 310 }
297                                                   311 
298 static unsigned long dax_entry_size(void *entr    312 static unsigned long dax_entry_size(void *entry)
299 {                                                 313 {
300         if (dax_is_zero_entry(entry))             314         if (dax_is_zero_entry(entry))
301                 return 0;                         315                 return 0;
302         else if (dax_is_empty_entry(entry))       316         else if (dax_is_empty_entry(entry))
303                 return 0;                         317                 return 0;
304         else if (dax_is_pmd_entry(entry))         318         else if (dax_is_pmd_entry(entry))
305                 return PMD_SIZE;                  319                 return PMD_SIZE;
306         else                                      320         else
307                 return PAGE_SIZE;                 321                 return PAGE_SIZE;
308 }                                                 322 }
309                                                   323 
310 static unsigned long dax_end_pfn(void *entry)     324 static unsigned long dax_end_pfn(void *entry)
311 {                                                 325 {
312         return dax_to_pfn(entry) + dax_entry_s    326         return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
313 }                                                 327 }
314                                                   328 
315 /*                                                329 /*
316  * Iterate through all mapped pfns represented    330  * Iterate through all mapped pfns represented by an entry, i.e. skip
317  * 'empty' and 'zero' entries.                    331  * 'empty' and 'zero' entries.
318  */                                               332  */
319 #define for_each_mapped_pfn(entry, pfn) \         333 #define for_each_mapped_pfn(entry, pfn) \
320         for (pfn = dax_to_pfn(entry); \           334         for (pfn = dax_to_pfn(entry); \
321                         pfn < dax_end_pfn(entr    335                         pfn < dax_end_pfn(entry); pfn++)
322                                                   336 
323 static inline bool dax_page_is_shared(struct p    337 static inline bool dax_page_is_shared(struct page *page)
324 {                                                 338 {
325         return page->mapping == PAGE_MAPPING_D    339         return page->mapping == PAGE_MAPPING_DAX_SHARED;
326 }                                                 340 }
327                                                   341 
328 /*                                                342 /*
329  * Set the page->mapping with PAGE_MAPPING_DAX    343  * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
330  * refcount.                                      344  * refcount.
331  */                                               345  */
332 static inline void dax_page_share_get(struct p    346 static inline void dax_page_share_get(struct page *page)
333 {                                                 347 {
334         if (page->mapping != PAGE_MAPPING_DAX_    348         if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
335                 /*                                349                 /*
336                  * Reset the index if the page    350                  * Reset the index if the page was already mapped
337                  * regularly before.              351                  * regularly before.
338                  */                               352                  */
339                 if (page->mapping)                353                 if (page->mapping)
340                         page->share = 1;          354                         page->share = 1;
341                 page->mapping = PAGE_MAPPING_D    355                 page->mapping = PAGE_MAPPING_DAX_SHARED;
342         }                                         356         }
343         page->share++;                            357         page->share++;
344 }                                                 358 }
345                                                   359 
346 static inline unsigned long dax_page_share_put    360 static inline unsigned long dax_page_share_put(struct page *page)
347 {                                                 361 {
348         return --page->share;                     362         return --page->share;
349 }                                                 363 }
350                                                   364 
351 /*                                                365 /*
352  * When it is called in dax_insert_entry(), th    366  * When it is called in dax_insert_entry(), the shared flag will indicate that
353  * whether this entry is shared by multiple fi    367  * whether this entry is shared by multiple files.  If so, set the page->mapping
354  * PAGE_MAPPING_DAX_SHARED, and use page->shar    368  * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
355  */                                               369  */
356 static void dax_associate_entry(void *entry, s    370 static void dax_associate_entry(void *entry, struct address_space *mapping,
357                 struct vm_area_struct *vma, un    371                 struct vm_area_struct *vma, unsigned long address, bool shared)
358 {                                                 372 {
359         unsigned long size = dax_entry_size(en    373         unsigned long size = dax_entry_size(entry), pfn, index;
360         int i = 0;                                374         int i = 0;
361                                                   375 
362         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))    376         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
363                 return;                           377                 return;
364                                                   378 
365         index = linear_page_index(vma, address    379         index = linear_page_index(vma, address & ~(size - 1));
366         for_each_mapped_pfn(entry, pfn) {         380         for_each_mapped_pfn(entry, pfn) {
367                 struct page *page = pfn_to_pag    381                 struct page *page = pfn_to_page(pfn);
368                                                   382 
369                 if (shared) {                     383                 if (shared) {
370                         dax_page_share_get(pag    384                         dax_page_share_get(page);
371                 } else {                          385                 } else {
372                         WARN_ON_ONCE(page->map    386                         WARN_ON_ONCE(page->mapping);
373                         page->mapping = mappin    387                         page->mapping = mapping;
374                         page->index = index +     388                         page->index = index + i++;
375                 }                                 389                 }
376         }                                         390         }
377 }                                                 391 }
378                                                   392 
379 static void dax_disassociate_entry(void *entry    393 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
380                 bool trunc)                       394                 bool trunc)
381 {                                                 395 {
382         unsigned long pfn;                        396         unsigned long pfn;
383                                                   397 
384         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))    398         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
385                 return;                           399                 return;
386                                                   400 
387         for_each_mapped_pfn(entry, pfn) {         401         for_each_mapped_pfn(entry, pfn) {
388                 struct page *page = pfn_to_pag    402                 struct page *page = pfn_to_page(pfn);
389                                                   403 
390                 WARN_ON_ONCE(trunc && page_ref    404                 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
391                 if (dax_page_is_shared(page))     405                 if (dax_page_is_shared(page)) {
392                         /* keep the shared fla    406                         /* keep the shared flag if this page is still shared */
393                         if (dax_page_share_put    407                         if (dax_page_share_put(page) > 0)
394                                 continue;         408                                 continue;
395                 } else                            409                 } else
396                         WARN_ON_ONCE(page->map    410                         WARN_ON_ONCE(page->mapping && page->mapping != mapping);
397                 page->mapping = NULL;             411                 page->mapping = NULL;
398                 page->index = 0;                  412                 page->index = 0;
399         }                                         413         }
400 }                                                 414 }
401                                                   415 
402 static struct page *dax_busy_page(void *entry)    416 static struct page *dax_busy_page(void *entry)
403 {                                                 417 {
404         unsigned long pfn;                        418         unsigned long pfn;
405                                                   419 
406         for_each_mapped_pfn(entry, pfn) {         420         for_each_mapped_pfn(entry, pfn) {
407                 struct page *page = pfn_to_pag    421                 struct page *page = pfn_to_page(pfn);
408                                                   422 
409                 if (page_ref_count(page) > 1)     423                 if (page_ref_count(page) > 1)
410                         return page;              424                         return page;
411         }                                         425         }
412         return NULL;                              426         return NULL;
413 }                                                 427 }
414                                                   428 
415 /**                                            !! 429 /*
416  * dax_lock_folio - Lock the DAX entry corresp !! 430  * dax_lock_page - Lock the DAX entry corresponding to a page
417  * @folio: The folio whose entry we want to lo !! 431  * @page: The page whose entry we want to lock
418  *                                                432  *
419  * Context: Process context.                      433  * Context: Process context.
420  * Return: A cookie to pass to dax_unlock_foli !! 434  * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
421  * not be locked.                                 435  * not be locked.
422  */                                               436  */
423 dax_entry_t dax_lock_folio(struct folio *folio !! 437 dax_entry_t dax_lock_page(struct page *page)
424 {                                                 438 {
425         XA_STATE(xas, NULL, 0);                   439         XA_STATE(xas, NULL, 0);
426         void *entry;                              440         void *entry;
427                                                   441 
428         /* Ensure folio->mapping isn't freed w !! 442         /* Ensure page->mapping isn't freed while we look at it */
429         rcu_read_lock();                          443         rcu_read_lock();
430         for (;;) {                                444         for (;;) {
431                 struct address_space *mapping  !! 445                 struct address_space *mapping = READ_ONCE(page->mapping);
432                                                   446 
433                 entry = NULL;                     447                 entry = NULL;
434                 if (!mapping || !dax_mapping(m    448                 if (!mapping || !dax_mapping(mapping))
435                         break;                    449                         break;
436                                                   450 
437                 /*                                451                 /*
438                  * In the device-dax case ther    452                  * In the device-dax case there's no need to lock, a
439                  * struct dev_pagemap pin is s    453                  * struct dev_pagemap pin is sufficient to keep the
440                  * inode alive, and we assume     454                  * inode alive, and we assume we have dev_pagemap pin
441                  * otherwise we would not have    455                  * otherwise we would not have a valid pfn_to_page()
442                  * translation.                   456                  * translation.
443                  */                               457                  */
444                 entry = (void *)~0UL;             458                 entry = (void *)~0UL;
445                 if (S_ISCHR(mapping->host->i_m    459                 if (S_ISCHR(mapping->host->i_mode))
446                         break;                    460                         break;
447                                                   461 
448                 xas.xa = &mapping->i_pages;       462                 xas.xa = &mapping->i_pages;
449                 xas_lock_irq(&xas);               463                 xas_lock_irq(&xas);
450                 if (mapping != folio->mapping) !! 464                 if (mapping != page->mapping) {
451                         xas_unlock_irq(&xas);     465                         xas_unlock_irq(&xas);
452                         continue;                 466                         continue;
453                 }                                 467                 }
454                 xas_set(&xas, folio->index);   !! 468                 xas_set(&xas, page->index);
455                 entry = xas_load(&xas);           469                 entry = xas_load(&xas);
456                 if (dax_is_locked(entry)) {       470                 if (dax_is_locked(entry)) {
457                         rcu_read_unlock();        471                         rcu_read_unlock();
458                         wait_entry_unlocked(&x    472                         wait_entry_unlocked(&xas, entry);
459                         rcu_read_lock();          473                         rcu_read_lock();
460                         continue;                 474                         continue;
461                 }                                 475                 }
462                 dax_lock_entry(&xas, entry);      476                 dax_lock_entry(&xas, entry);
463                 xas_unlock_irq(&xas);             477                 xas_unlock_irq(&xas);
464                 break;                            478                 break;
465         }                                         479         }
466         rcu_read_unlock();                        480         rcu_read_unlock();
467         return (dax_entry_t)entry;                481         return (dax_entry_t)entry;
468 }                                                 482 }
469                                                   483 
470 void dax_unlock_folio(struct folio *folio, dax !! 484 void dax_unlock_page(struct page *page, dax_entry_t cookie)
471 {                                                 485 {
472         struct address_space *mapping = folio- !! 486         struct address_space *mapping = page->mapping;
473         XA_STATE(xas, &mapping->i_pages, folio !! 487         XA_STATE(xas, &mapping->i_pages, page->index);
474                                                   488 
475         if (S_ISCHR(mapping->host->i_mode))       489         if (S_ISCHR(mapping->host->i_mode))
476                 return;                           490                 return;
477                                                   491 
478         dax_unlock_entry(&xas, (void *)cookie)    492         dax_unlock_entry(&xas, (void *)cookie);
479 }                                                 493 }
480                                                   494 
481 /*                                                495 /*
482  * dax_lock_mapping_entry - Lock the DAX entry    496  * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
483  * @mapping: the file's mapping whose entry we    497  * @mapping: the file's mapping whose entry we want to lock
484  * @index: the offset within this file            498  * @index: the offset within this file
485  * @page: output the dax page corresponding to    499  * @page: output the dax page corresponding to this dax entry
486  *                                                500  *
487  * Return: A cookie to pass to dax_unlock_mapp    501  * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
488  * could not be locked.                           502  * could not be locked.
489  */                                               503  */
490 dax_entry_t dax_lock_mapping_entry(struct addr    504 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
491                 struct page **page)               505                 struct page **page)
492 {                                                 506 {
493         XA_STATE(xas, NULL, 0);                   507         XA_STATE(xas, NULL, 0);
494         void *entry;                              508         void *entry;
495                                                   509 
496         rcu_read_lock();                          510         rcu_read_lock();
497         for (;;) {                                511         for (;;) {
498                 entry = NULL;                     512                 entry = NULL;
499                 if (!dax_mapping(mapping))        513                 if (!dax_mapping(mapping))
500                         break;                    514                         break;
501                                                   515 
502                 xas.xa = &mapping->i_pages;       516                 xas.xa = &mapping->i_pages;
503                 xas_lock_irq(&xas);               517                 xas_lock_irq(&xas);
504                 xas_set(&xas, index);             518                 xas_set(&xas, index);
505                 entry = xas_load(&xas);           519                 entry = xas_load(&xas);
506                 if (dax_is_locked(entry)) {       520                 if (dax_is_locked(entry)) {
507                         rcu_read_unlock();        521                         rcu_read_unlock();
508                         wait_entry_unlocked(&x    522                         wait_entry_unlocked(&xas, entry);
509                         rcu_read_lock();          523                         rcu_read_lock();
510                         continue;                 524                         continue;
511                 }                                 525                 }
512                 if (!entry ||                     526                 if (!entry ||
513                     dax_is_zero_entry(entry) |    527                     dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
514                         /*                        528                         /*
515                          * Because we are look    529                          * Because we are looking for entry from file's mapping
516                          * and index, so the e    530                          * and index, so the entry may not be inserted for now,
517                          * or even a zero/empt    531                          * or even a zero/empty entry.  We don't think this is
518                          * an error case.  So,    532                          * an error case.  So, return a special value and do
519                          * not output @page.      533                          * not output @page.
520                          */                       534                          */
521                         entry = (void *)~0UL;     535                         entry = (void *)~0UL;
522                 } else {                          536                 } else {
523                         *page = pfn_to_page(da    537                         *page = pfn_to_page(dax_to_pfn(entry));
524                         dax_lock_entry(&xas, e    538                         dax_lock_entry(&xas, entry);
525                 }                                 539                 }
526                 xas_unlock_irq(&xas);             540                 xas_unlock_irq(&xas);
527                 break;                            541                 break;
528         }                                         542         }
529         rcu_read_unlock();                        543         rcu_read_unlock();
530         return (dax_entry_t)entry;                544         return (dax_entry_t)entry;
531 }                                                 545 }
532                                                   546 
533 void dax_unlock_mapping_entry(struct address_s    547 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
534                 dax_entry_t cookie)               548                 dax_entry_t cookie)
535 {                                                 549 {
536         XA_STATE(xas, &mapping->i_pages, index    550         XA_STATE(xas, &mapping->i_pages, index);
537                                                   551 
538         if (cookie == ~0UL)                       552         if (cookie == ~0UL)
539                 return;                           553                 return;
540                                                   554 
541         dax_unlock_entry(&xas, (void *)cookie)    555         dax_unlock_entry(&xas, (void *)cookie);
542 }                                                 556 }
543                                                   557 
544 /*                                                558 /*
545  * Find page cache entry at given index. If it    559  * Find page cache entry at given index. If it is a DAX entry, return it
546  * with the entry locked. If the page cache do    560  * with the entry locked. If the page cache doesn't contain an entry at
547  * that index, add a locked empty entry.          561  * that index, add a locked empty entry.
548  *                                                562  *
549  * When requesting an entry with size DAX_PMD,    563  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
550  * either return that locked entry or will ret    564  * either return that locked entry or will return VM_FAULT_FALLBACK.
551  * This will happen if there are any PTE entri    565  * This will happen if there are any PTE entries within the PMD range
552  * that we are requesting.                        566  * that we are requesting.
553  *                                                567  *
554  * We always favor PTE entries over PMD entrie    568  * We always favor PTE entries over PMD entries. There isn't a flow where we
555  * evict PTE entries in order to 'upgrade' the    569  * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
556  * insertion will fail if it finds any PTE ent    570  * insertion will fail if it finds any PTE entries already in the tree, and a
557  * PTE insertion will cause an existing PMD en    571  * PTE insertion will cause an existing PMD entry to be unmapped and
558  * downgraded to PTE entries.  This happens fo    572  * downgraded to PTE entries.  This happens for both PMD zero pages as
559  * well as PMD empty entries.                     573  * well as PMD empty entries.
560  *                                                574  *
561  * The exception to this downgrade path is for    575  * The exception to this downgrade path is for PMD entries that have
562  * real storage backing them.  We will leave t    576  * real storage backing them.  We will leave these real PMD entries in
563  * the tree, and PTE writes will simply dirty     577  * the tree, and PTE writes will simply dirty the entire PMD entry.
564  *                                                578  *
565  * Note: Unlike filemap_fault() we don't honor    579  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
566  * persistent memory the benefit is doubtful.     580  * persistent memory the benefit is doubtful. We can add that later if we can
567  * show it helps.                                 581  * show it helps.
568  *                                                582  *
569  * On error, this function does not return an     583  * On error, this function does not return an ERR_PTR.  Instead it returns
570  * a VM_FAULT code, encoded as an xarray inter    584  * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
571  * overlap with xarray value entries.             585  * overlap with xarray value entries.
572  */                                               586  */
573 static void *grab_mapping_entry(struct xa_stat    587 static void *grab_mapping_entry(struct xa_state *xas,
574                 struct address_space *mapping,    588                 struct address_space *mapping, unsigned int order)
575 {                                                 589 {
576         unsigned long index = xas->xa_index;      590         unsigned long index = xas->xa_index;
577         bool pmd_downgrade;     /* splitting P    591         bool pmd_downgrade;     /* splitting PMD entry into PTE entries? */
578         void *entry;                              592         void *entry;
579                                                   593 
580 retry:                                            594 retry:
581         pmd_downgrade = false;                    595         pmd_downgrade = false;
582         xas_lock_irq(xas);                        596         xas_lock_irq(xas);
583         entry = get_unlocked_entry(xas, order)    597         entry = get_unlocked_entry(xas, order);
584                                                   598 
585         if (entry) {                              599         if (entry) {
586                 if (dax_is_conflict(entry))       600                 if (dax_is_conflict(entry))
587                         goto fallback;            601                         goto fallback;
588                 if (!xa_is_value(entry)) {        602                 if (!xa_is_value(entry)) {
589                         xas_set_err(xas, -EIO)    603                         xas_set_err(xas, -EIO);
590                         goto out_unlock;          604                         goto out_unlock;
591                 }                                 605                 }
592                                                   606 
593                 if (order == 0) {                 607                 if (order == 0) {
594                         if (dax_is_pmd_entry(e    608                         if (dax_is_pmd_entry(entry) &&
595                             (dax_is_zero_entry    609                             (dax_is_zero_entry(entry) ||
596                              dax_is_empty_entr    610                              dax_is_empty_entry(entry))) {
597                                 pmd_downgrade     611                                 pmd_downgrade = true;
598                         }                         612                         }
599                 }                                 613                 }
600         }                                         614         }
601                                                   615 
602         if (pmd_downgrade) {                      616         if (pmd_downgrade) {
603                 /*                                617                 /*
604                  * Make sure 'entry' remains v    618                  * Make sure 'entry' remains valid while we drop
605                  * the i_pages lock.              619                  * the i_pages lock.
606                  */                               620                  */
607                 dax_lock_entry(xas, entry);       621                 dax_lock_entry(xas, entry);
608                                                   622 
609                 /*                                623                 /*
610                  * Besides huge zero pages the    624                  * Besides huge zero pages the only other thing that gets
611                  * downgraded are empty entrie    625                  * downgraded are empty entries which don't need to be
612                  * unmapped.                      626                  * unmapped.
613                  */                               627                  */
614                 if (dax_is_zero_entry(entry))     628                 if (dax_is_zero_entry(entry)) {
615                         xas_unlock_irq(xas);      629                         xas_unlock_irq(xas);
616                         unmap_mapping_pages(ma    630                         unmap_mapping_pages(mapping,
617                                         xas->x    631                                         xas->xa_index & ~PG_PMD_COLOUR,
618                                         PG_PMD    632                                         PG_PMD_NR, false);
619                         xas_reset(xas);           633                         xas_reset(xas);
620                         xas_lock_irq(xas);        634                         xas_lock_irq(xas);
621                 }                                 635                 }
622                                                   636 
623                 dax_disassociate_entry(entry,     637                 dax_disassociate_entry(entry, mapping, false);
624                 xas_store(xas, NULL);   /* und    638                 xas_store(xas, NULL);   /* undo the PMD join */
625                 dax_wake_entry(xas, entry, WAK    639                 dax_wake_entry(xas, entry, WAKE_ALL);
626                 mapping->nrpages -= PG_PMD_NR;    640                 mapping->nrpages -= PG_PMD_NR;
627                 entry = NULL;                     641                 entry = NULL;
628                 xas_set(xas, index);              642                 xas_set(xas, index);
629         }                                         643         }
630                                                   644 
631         if (entry) {                              645         if (entry) {
632                 dax_lock_entry(xas, entry);       646                 dax_lock_entry(xas, entry);
633         } else {                                  647         } else {
634                 unsigned long flags = DAX_EMPT    648                 unsigned long flags = DAX_EMPTY;
635                                                   649 
636                 if (order > 0)                    650                 if (order > 0)
637                         flags |= DAX_PMD;         651                         flags |= DAX_PMD;
638                 entry = dax_make_entry(pfn_to_    652                 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
639                 dax_lock_entry(xas, entry);       653                 dax_lock_entry(xas, entry);
640                 if (xas_error(xas))               654                 if (xas_error(xas))
641                         goto out_unlock;          655                         goto out_unlock;
642                 mapping->nrpages += 1UL << ord    656                 mapping->nrpages += 1UL << order;
643         }                                         657         }
644                                                   658 
645 out_unlock:                                       659 out_unlock:
646         xas_unlock_irq(xas);                      660         xas_unlock_irq(xas);
647         if (xas_nomem(xas, mapping_gfp_mask(ma    661         if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
648                 goto retry;                       662                 goto retry;
649         if (xas->xa_node == XA_ERROR(-ENOMEM))    663         if (xas->xa_node == XA_ERROR(-ENOMEM))
650                 return xa_mk_internal(VM_FAULT    664                 return xa_mk_internal(VM_FAULT_OOM);
651         if (xas_error(xas))                       665         if (xas_error(xas))
652                 return xa_mk_internal(VM_FAULT    666                 return xa_mk_internal(VM_FAULT_SIGBUS);
653         return entry;                             667         return entry;
654 fallback:                                         668 fallback:
655         xas_unlock_irq(xas);                      669         xas_unlock_irq(xas);
656         return xa_mk_internal(VM_FAULT_FALLBAC    670         return xa_mk_internal(VM_FAULT_FALLBACK);
657 }                                                 671 }
658                                                   672 
659 /**                                               673 /**
660  * dax_layout_busy_page_range - find first pin    674  * dax_layout_busy_page_range - find first pinned page in @mapping
661  * @mapping: address space to scan for a page     675  * @mapping: address space to scan for a page with ref count > 1
662  * @start: Starting offset. Page containing 's    676  * @start: Starting offset. Page containing 'start' is included.
663  * @end: End offset. Page containing 'end' is     677  * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
664  *       pages from 'start' till the end of fi    678  *       pages from 'start' till the end of file are included.
665  *                                                679  *
666  * DAX requires ZONE_DEVICE mapped pages. Thes    680  * DAX requires ZONE_DEVICE mapped pages. These pages are never
667  * 'onlined' to the page allocator so they are    681  * 'onlined' to the page allocator so they are considered idle when
668  * page->count == 1. A filesystem uses this in    682  * page->count == 1. A filesystem uses this interface to determine if
669  * any page in the mapping is busy, i.e. for D    683  * any page in the mapping is busy, i.e. for DMA, or other
670  * get_user_pages() usages.                       684  * get_user_pages() usages.
671  *                                                685  *
672  * It is expected that the filesystem is holdi    686  * It is expected that the filesystem is holding locks to block the
673  * establishment of new mappings in this addre    687  * establishment of new mappings in this address_space. I.e. it expects
674  * to be able to run unmap_mapping_range() and    688  * to be able to run unmap_mapping_range() and subsequently not race
675  * mapping_mapped() becoming true.                689  * mapping_mapped() becoming true.
676  */                                               690  */
677 struct page *dax_layout_busy_page_range(struct    691 struct page *dax_layout_busy_page_range(struct address_space *mapping,
678                                         loff_t    692                                         loff_t start, loff_t end)
679 {                                                 693 {
680         void *entry;                              694         void *entry;
681         unsigned int scanned = 0;                 695         unsigned int scanned = 0;
682         struct page *page = NULL;                 696         struct page *page = NULL;
683         pgoff_t start_idx = start >> PAGE_SHIF    697         pgoff_t start_idx = start >> PAGE_SHIFT;
684         pgoff_t end_idx;                          698         pgoff_t end_idx;
685         XA_STATE(xas, &mapping->i_pages, start    699         XA_STATE(xas, &mapping->i_pages, start_idx);
686                                                   700 
687         /*                                        701         /*
688          * In the 'limited' case get_user_page    702          * In the 'limited' case get_user_pages() for dax is disabled.
689          */                                       703          */
690         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))    704         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
691                 return NULL;                      705                 return NULL;
692                                                   706 
693         if (!dax_mapping(mapping) || !mapping_    707         if (!dax_mapping(mapping) || !mapping_mapped(mapping))
694                 return NULL;                      708                 return NULL;
695                                                   709 
696         /* If end == LLONG_MAX, all pages from    710         /* If end == LLONG_MAX, all pages from start to till end of file */
697         if (end == LLONG_MAX)                     711         if (end == LLONG_MAX)
698                 end_idx = ULONG_MAX;              712                 end_idx = ULONG_MAX;
699         else                                      713         else
700                 end_idx = end >> PAGE_SHIFT;      714                 end_idx = end >> PAGE_SHIFT;
701         /*                                        715         /*
702          * If we race get_user_pages_fast() he    716          * If we race get_user_pages_fast() here either we'll see the
703          * elevated page count in the iteratio    717          * elevated page count in the iteration and wait, or
704          * get_user_pages_fast() will see that    718          * get_user_pages_fast() will see that the page it took a reference
705          * against is no longer mapped in the     719          * against is no longer mapped in the page tables and bail to the
706          * get_user_pages() slow path.  The sl    720          * get_user_pages() slow path.  The slow path is protected by
707          * pte_lock() and pmd_lock(). New refe    721          * pte_lock() and pmd_lock(). New references are not taken without
708          * holding those locks, and unmap_mapp    722          * holding those locks, and unmap_mapping_pages() will not zero the
709          * pte or pmd without holding the resp    723          * pte or pmd without holding the respective lock, so we are
710          * guaranteed to either see new refere    724          * guaranteed to either see new references or prevent new
711          * references from being established.     725          * references from being established.
712          */                                       726          */
713         unmap_mapping_pages(mapping, start_idx    727         unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
714                                                   728 
715         xas_lock_irq(&xas);                       729         xas_lock_irq(&xas);
716         xas_for_each(&xas, entry, end_idx) {      730         xas_for_each(&xas, entry, end_idx) {
717                 if (WARN_ON_ONCE(!xa_is_value(    731                 if (WARN_ON_ONCE(!xa_is_value(entry)))
718                         continue;                 732                         continue;
719                 if (unlikely(dax_is_locked(ent    733                 if (unlikely(dax_is_locked(entry)))
720                         entry = get_unlocked_e    734                         entry = get_unlocked_entry(&xas, 0);
721                 if (entry)                        735                 if (entry)
722                         page = dax_busy_page(e    736                         page = dax_busy_page(entry);
723                 put_unlocked_entry(&xas, entry    737                 put_unlocked_entry(&xas, entry, WAKE_NEXT);
724                 if (page)                         738                 if (page)
725                         break;                    739                         break;
726                 if (++scanned % XA_CHECK_SCHED    740                 if (++scanned % XA_CHECK_SCHED)
727                         continue;                 741                         continue;
728                                                   742 
729                 xas_pause(&xas);                  743                 xas_pause(&xas);
730                 xas_unlock_irq(&xas);             744                 xas_unlock_irq(&xas);
731                 cond_resched();                   745                 cond_resched();
732                 xas_lock_irq(&xas);               746                 xas_lock_irq(&xas);
733         }                                         747         }
734         xas_unlock_irq(&xas);                     748         xas_unlock_irq(&xas);
735         return page;                              749         return page;
736 }                                                 750 }
737 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);    751 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
738                                                   752 
739 struct page *dax_layout_busy_page(struct addre    753 struct page *dax_layout_busy_page(struct address_space *mapping)
740 {                                                 754 {
741         return dax_layout_busy_page_range(mapp    755         return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
742 }                                                 756 }
743 EXPORT_SYMBOL_GPL(dax_layout_busy_page);          757 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
744                                                   758 
745 static int __dax_invalidate_entry(struct addre    759 static int __dax_invalidate_entry(struct address_space *mapping,
746                                           pgof    760                                           pgoff_t index, bool trunc)
747 {                                                 761 {
748         XA_STATE(xas, &mapping->i_pages, index    762         XA_STATE(xas, &mapping->i_pages, index);
749         int ret = 0;                              763         int ret = 0;
750         void *entry;                              764         void *entry;
751                                                   765 
752         xas_lock_irq(&xas);                       766         xas_lock_irq(&xas);
753         entry = get_unlocked_entry(&xas, 0);      767         entry = get_unlocked_entry(&xas, 0);
754         if (!entry || WARN_ON_ONCE(!xa_is_valu    768         if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
755                 goto out;                         769                 goto out;
756         if (!trunc &&                             770         if (!trunc &&
757             (xas_get_mark(&xas, PAGECACHE_TAG_    771             (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
758              xas_get_mark(&xas, PAGECACHE_TAG_    772              xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
759                 goto out;                         773                 goto out;
760         dax_disassociate_entry(entry, mapping,    774         dax_disassociate_entry(entry, mapping, trunc);
761         xas_store(&xas, NULL);                    775         xas_store(&xas, NULL);
762         mapping->nrpages -= 1UL << dax_entry_o    776         mapping->nrpages -= 1UL << dax_entry_order(entry);
763         ret = 1;                                  777         ret = 1;
764 out:                                              778 out:
765         put_unlocked_entry(&xas, entry, WAKE_A    779         put_unlocked_entry(&xas, entry, WAKE_ALL);
766         xas_unlock_irq(&xas);                     780         xas_unlock_irq(&xas);
767         return ret;                               781         return ret;
768 }                                                 782 }
769                                                   783 
770 static int __dax_clear_dirty_range(struct addr    784 static int __dax_clear_dirty_range(struct address_space *mapping,
771                 pgoff_t start, pgoff_t end)       785                 pgoff_t start, pgoff_t end)
772 {                                                 786 {
773         XA_STATE(xas, &mapping->i_pages, start    787         XA_STATE(xas, &mapping->i_pages, start);
774         unsigned int scanned = 0;                 788         unsigned int scanned = 0;
775         void *entry;                              789         void *entry;
776                                                   790 
777         xas_lock_irq(&xas);                       791         xas_lock_irq(&xas);
778         xas_for_each(&xas, entry, end) {          792         xas_for_each(&xas, entry, end) {
779                 entry = get_unlocked_entry(&xa    793                 entry = get_unlocked_entry(&xas, 0);
780                 xas_clear_mark(&xas, PAGECACHE    794                 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
781                 xas_clear_mark(&xas, PAGECACHE    795                 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
782                 put_unlocked_entry(&xas, entry    796                 put_unlocked_entry(&xas, entry, WAKE_NEXT);
783                                                   797 
784                 if (++scanned % XA_CHECK_SCHED    798                 if (++scanned % XA_CHECK_SCHED)
785                         continue;                 799                         continue;
786                                                   800 
787                 xas_pause(&xas);                  801                 xas_pause(&xas);
788                 xas_unlock_irq(&xas);             802                 xas_unlock_irq(&xas);
789                 cond_resched();                   803                 cond_resched();
790                 xas_lock_irq(&xas);               804                 xas_lock_irq(&xas);
791         }                                         805         }
792         xas_unlock_irq(&xas);                     806         xas_unlock_irq(&xas);
793                                                   807 
794         return 0;                                 808         return 0;
795 }                                                 809 }
796                                                   810 
797 /*                                                811 /*
798  * Delete DAX entry at @index from @mapping.      812  * Delete DAX entry at @index from @mapping.  Wait for it
799  * to be unlocked before deleting it.             813  * to be unlocked before deleting it.
800  */                                               814  */
801 int dax_delete_mapping_entry(struct address_sp    815 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
802 {                                                 816 {
803         int ret = __dax_invalidate_entry(mappi    817         int ret = __dax_invalidate_entry(mapping, index, true);
804                                                   818 
805         /*                                        819         /*
806          * This gets called from truncate / pu    820          * This gets called from truncate / punch_hole path. As such, the caller
807          * must hold locks protecting against     821          * must hold locks protecting against concurrent modifications of the
808          * page cache (usually fs-private i_mm    822          * page cache (usually fs-private i_mmap_sem for writing). Since the
809          * caller has seen a DAX entry for thi    823          * caller has seen a DAX entry for this index, we better find it
810          * at that index as well...               824          * at that index as well...
811          */                                       825          */
812         WARN_ON_ONCE(!ret);                       826         WARN_ON_ONCE(!ret);
813         return ret;                               827         return ret;
814 }                                                 828 }
815                                                   829 
816 /*                                                830 /*
817  * Invalidate DAX entry if it is clean.           831  * Invalidate DAX entry if it is clean.
818  */                                               832  */
819 int dax_invalidate_mapping_entry_sync(struct a    833 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
820                                       pgoff_t     834                                       pgoff_t index)
821 {                                                 835 {
822         return __dax_invalidate_entry(mapping,    836         return __dax_invalidate_entry(mapping, index, false);
823 }                                                 837 }
824                                                   838 
825 static pgoff_t dax_iomap_pgoff(const struct io    839 static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
826 {                                                 840 {
827         return PHYS_PFN(iomap->addr + (pos & P    841         return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
828 }                                                 842 }
829                                                   843 
830 static int copy_cow_page_dax(struct vm_fault *    844 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
831 {                                                 845 {
832         pgoff_t pgoff = dax_iomap_pgoff(&iter-    846         pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
833         void *vto, *kaddr;                        847         void *vto, *kaddr;
834         long rc;                                  848         long rc;
835         int id;                                   849         int id;
836                                                   850 
837         id = dax_read_lock();                     851         id = dax_read_lock();
838         rc = dax_direct_access(iter->iomap.dax    852         rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
839                                 &kaddr, NULL);    853                                 &kaddr, NULL);
840         if (rc < 0) {                             854         if (rc < 0) {
841                 dax_read_unlock(id);              855                 dax_read_unlock(id);
842                 return rc;                        856                 return rc;
843         }                                         857         }
844         vto = kmap_atomic(vmf->cow_page);         858         vto = kmap_atomic(vmf->cow_page);
845         copy_user_page(vto, kaddr, vmf->addres    859         copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
846         kunmap_atomic(vto);                       860         kunmap_atomic(vto);
847         dax_read_unlock(id);                      861         dax_read_unlock(id);
848         return 0;                                 862         return 0;
849 }                                                 863 }
850                                                   864 
851 /*                                                865 /*
852  * MAP_SYNC on a dax mapping guarantees dirty     866  * MAP_SYNC on a dax mapping guarantees dirty metadata is
853  * flushed on write-faults (non-cow), but not     867  * flushed on write-faults (non-cow), but not read-faults.
854  */                                               868  */
855 static bool dax_fault_is_synchronous(const str    869 static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
856                 struct vm_area_struct *vma)       870                 struct vm_area_struct *vma)
857 {                                                 871 {
858         return (iter->flags & IOMAP_WRITE) &&     872         return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
859                 (iter->iomap.flags & IOMAP_F_D    873                 (iter->iomap.flags & IOMAP_F_DIRTY);
860 }                                                 874 }
861                                                   875 
862 /*                                                876 /*
863  * By this point grab_mapping_entry() has ensu    877  * By this point grab_mapping_entry() has ensured that we have a locked entry
864  * of the appropriate size so we don't have to    878  * of the appropriate size so we don't have to worry about downgrading PMDs to
865  * PTEs.  If we happen to be trying to insert     879  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
866  * already in the tree, we will skip the inser    880  * already in the tree, we will skip the insertion and just dirty the PMD as
867  * appropriate.                                   881  * appropriate.
868  */                                               882  */
869 static void *dax_insert_entry(struct xa_state     883 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
870                 const struct iomap_iter *iter,    884                 const struct iomap_iter *iter, void *entry, pfn_t pfn,
871                 unsigned long flags)              885                 unsigned long flags)
872 {                                                 886 {
873         struct address_space *mapping = vmf->v    887         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
874         void *new_entry = dax_make_entry(pfn,     888         void *new_entry = dax_make_entry(pfn, flags);
875         bool write = iter->flags & IOMAP_WRITE    889         bool write = iter->flags & IOMAP_WRITE;
876         bool dirty = write && !dax_fault_is_sy    890         bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
877         bool shared = iter->iomap.flags & IOMA    891         bool shared = iter->iomap.flags & IOMAP_F_SHARED;
878                                                   892 
879         if (dirty)                                893         if (dirty)
880                 __mark_inode_dirty(mapping->ho    894                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
881                                                   895 
882         if (shared || (dax_is_zero_entry(entry    896         if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
883                 unsigned long index = xas->xa_    897                 unsigned long index = xas->xa_index;
884                 /* we are replacing a zero pag    898                 /* we are replacing a zero page with block mapping */
885                 if (dax_is_pmd_entry(entry))      899                 if (dax_is_pmd_entry(entry))
886                         unmap_mapping_pages(ma    900                         unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
887                                         PG_PMD    901                                         PG_PMD_NR, false);
888                 else /* pte entry */              902                 else /* pte entry */
889                         unmap_mapping_pages(ma    903                         unmap_mapping_pages(mapping, index, 1, false);
890         }                                         904         }
891                                                   905 
892         xas_reset(xas);                           906         xas_reset(xas);
893         xas_lock_irq(xas);                        907         xas_lock_irq(xas);
894         if (shared || dax_is_zero_entry(entry)    908         if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
895                 void *old;                        909                 void *old;
896                                                   910 
897                 dax_disassociate_entry(entry,     911                 dax_disassociate_entry(entry, mapping, false);
898                 dax_associate_entry(new_entry,    912                 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
899                                 shared);          913                                 shared);
900                 /*                                914                 /*
901                  * Only swap our new entry int    915                  * Only swap our new entry into the page cache if the current
902                  * entry is a zero page or an     916                  * entry is a zero page or an empty entry.  If a normal PTE or
903                  * PMD entry is already in the    917                  * PMD entry is already in the cache, we leave it alone.  This
904                  * means that if we are trying    918                  * means that if we are trying to insert a PTE and the
905                  * existing entry is a PMD, we    919                  * existing entry is a PMD, we will just leave the PMD in the
906                  * tree and dirty it if necess    920                  * tree and dirty it if necessary.
907                  */                               921                  */
908                 old = dax_lock_entry(xas, new_    922                 old = dax_lock_entry(xas, new_entry);
909                 WARN_ON_ONCE(old != xa_mk_valu    923                 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
910                                         DAX_LO    924                                         DAX_LOCKED));
911                 entry = new_entry;                925                 entry = new_entry;
912         } else {                                  926         } else {
913                 xas_load(xas);  /* Walk the xa    927                 xas_load(xas);  /* Walk the xa_state */
914         }                                         928         }
915                                                   929 
916         if (dirty)                                930         if (dirty)
917                 xas_set_mark(xas, PAGECACHE_TA    931                 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
918                                                   932 
919         if (write && shared)                      933         if (write && shared)
920                 xas_set_mark(xas, PAGECACHE_TA    934                 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
921                                                   935 
922         xas_unlock_irq(xas);                      936         xas_unlock_irq(xas);
923         return entry;                             937         return entry;
924 }                                                 938 }
925                                                   939 
926 static int dax_writeback_one(struct xa_state *    940 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
927                 struct address_space *mapping,    941                 struct address_space *mapping, void *entry)
928 {                                                 942 {
929         unsigned long pfn, index, count, end;     943         unsigned long pfn, index, count, end;
930         long ret = 0;                             944         long ret = 0;
931         struct vm_area_struct *vma;               945         struct vm_area_struct *vma;
932                                                   946 
933         /*                                        947         /*
934          * A page got tagged dirty in DAX mapp    948          * A page got tagged dirty in DAX mapping? Something is seriously
935          * wrong.                                 949          * wrong.
936          */                                       950          */
937         if (WARN_ON(!xa_is_value(entry)))         951         if (WARN_ON(!xa_is_value(entry)))
938                 return -EIO;                      952                 return -EIO;
939                                                   953 
940         if (unlikely(dax_is_locked(entry))) {     954         if (unlikely(dax_is_locked(entry))) {
941                 void *old_entry = entry;          955                 void *old_entry = entry;
942                                                   956 
943                 entry = get_unlocked_entry(xas    957                 entry = get_unlocked_entry(xas, 0);
944                                                   958 
945                 /* Entry got punched out / rea    959                 /* Entry got punched out / reallocated? */
946                 if (!entry || WARN_ON_ONCE(!xa    960                 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
947                         goto put_unlocked;        961                         goto put_unlocked;
948                 /*                                962                 /*
949                  * Entry got reallocated elsew    963                  * Entry got reallocated elsewhere? No need to writeback.
950                  * We have to compare pfns as     964                  * We have to compare pfns as we must not bail out due to
951                  * difference in lockbit or en    965                  * difference in lockbit or entry type.
952                  */                               966                  */
953                 if (dax_to_pfn(old_entry) != d    967                 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
954                         goto put_unlocked;        968                         goto put_unlocked;
955                 if (WARN_ON_ONCE(dax_is_empty_    969                 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
956                                         dax_is    970                                         dax_is_zero_entry(entry))) {
957                         ret = -EIO;               971                         ret = -EIO;
958                         goto put_unlocked;        972                         goto put_unlocked;
959                 }                                 973                 }
960                                                   974 
961                 /* Another fsync thread may ha    975                 /* Another fsync thread may have already done this entry */
962                 if (!xas_get_mark(xas, PAGECAC    976                 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
963                         goto put_unlocked;        977                         goto put_unlocked;
964         }                                         978         }
965                                                   979 
966         /* Lock the entry to serialize with pa    980         /* Lock the entry to serialize with page faults */
967         dax_lock_entry(xas, entry);               981         dax_lock_entry(xas, entry);
968                                                   982 
969         /*                                        983         /*
970          * We can clear the tag now but we hav    984          * We can clear the tag now but we have to be careful so that concurrent
971          * dax_writeback_one() calls for the s    985          * dax_writeback_one() calls for the same index cannot finish before we
972          * actually flush the caches. This is     986          * actually flush the caches. This is achieved as the calls will look
973          * at the entry only under the i_pages    987          * at the entry only under the i_pages lock and once they do that
974          * they will see the entry locked and     988          * they will see the entry locked and wait for it to unlock.
975          */                                       989          */
976         xas_clear_mark(xas, PAGECACHE_TAG_TOWR    990         xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
977         xas_unlock_irq(xas);                      991         xas_unlock_irq(xas);
978                                                   992 
979         /*                                        993         /*
980          * If dax_writeback_mapping_range() wa    994          * If dax_writeback_mapping_range() was given a wbc->range_start
981          * in the middle of a PMD, the 'index'    995          * in the middle of a PMD, the 'index' we use needs to be
982          * aligned to the start of the PMD.       996          * aligned to the start of the PMD.
983          * This allows us to flush for PMD_SIZ    997          * This allows us to flush for PMD_SIZE and not have to worry about
984          * partial PMD writebacks.                998          * partial PMD writebacks.
985          */                                       999          */
986         pfn = dax_to_pfn(entry);                  1000         pfn = dax_to_pfn(entry);
987         count = 1UL << dax_entry_order(entry);    1001         count = 1UL << dax_entry_order(entry);
988         index = xas->xa_index & ~(count - 1);     1002         index = xas->xa_index & ~(count - 1);
989         end = index + count - 1;                  1003         end = index + count - 1;
990                                                   1004 
991         /* Walk all mappings of a given index     1005         /* Walk all mappings of a given index of a file and writeprotect them */
992         i_mmap_lock_read(mapping);                1006         i_mmap_lock_read(mapping);
993         vma_interval_tree_foreach(vma, &mappin    1007         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
994                 pfn_mkclean_range(pfn, count,     1008                 pfn_mkclean_range(pfn, count, index, vma);
995                 cond_resched();                   1009                 cond_resched();
996         }                                         1010         }
997         i_mmap_unlock_read(mapping);              1011         i_mmap_unlock_read(mapping);
998                                                   1012 
999         dax_flush(dax_dev, page_address(pfn_to    1013         dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
1000         /*                                       1014         /*
1001          * After we have flushed the cache, w    1015          * After we have flushed the cache, we can clear the dirty tag. There
1002          * cannot be new dirty data in the pf    1016          * cannot be new dirty data in the pfn after the flush has completed as
1003          * the pfn mappings are writeprotecte    1017          * the pfn mappings are writeprotected and fault waits for mapping
1004          * entry lock.                           1018          * entry lock.
1005          */                                      1019          */
1006         xas_reset(xas);                          1020         xas_reset(xas);
1007         xas_lock_irq(xas);                       1021         xas_lock_irq(xas);
1008         xas_store(xas, entry);                   1022         xas_store(xas, entry);
1009         xas_clear_mark(xas, PAGECACHE_TAG_DIR    1023         xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
1010         dax_wake_entry(xas, entry, WAKE_NEXT)    1024         dax_wake_entry(xas, entry, WAKE_NEXT);
1011                                                  1025 
1012         trace_dax_writeback_one(mapping->host    1026         trace_dax_writeback_one(mapping->host, index, count);
1013         return ret;                              1027         return ret;
1014                                                  1028 
1015  put_unlocked:                                   1029  put_unlocked:
1016         put_unlocked_entry(xas, entry, WAKE_N    1030         put_unlocked_entry(xas, entry, WAKE_NEXT);
1017         return ret;                              1031         return ret;
1018 }                                                1032 }
1019                                                  1033 
1020 /*                                               1034 /*
1021  * Flush the mapping to the persistent domain    1035  * Flush the mapping to the persistent domain within the byte range of [start,
1022  * end]. This is required by data integrity o    1036  * end]. This is required by data integrity operations to ensure file data is
1023  * on persistent storage prior to completion     1037  * on persistent storage prior to completion of the operation.
1024  */                                              1038  */
1025 int dax_writeback_mapping_range(struct addres    1039 int dax_writeback_mapping_range(struct address_space *mapping,
1026                 struct dax_device *dax_dev, s    1040                 struct dax_device *dax_dev, struct writeback_control *wbc)
1027 {                                                1041 {
1028         XA_STATE(xas, &mapping->i_pages, wbc-    1042         XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
1029         struct inode *inode = mapping->host;     1043         struct inode *inode = mapping->host;
1030         pgoff_t end_index = wbc->range_end >>    1044         pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
1031         void *entry;                             1045         void *entry;
1032         int ret = 0;                             1046         int ret = 0;
1033         unsigned int scanned = 0;                1047         unsigned int scanned = 0;
1034                                                  1048 
1035         if (WARN_ON_ONCE(inode->i_blkbits !=     1049         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1036                 return -EIO;                     1050                 return -EIO;
1037                                                  1051 
1038         if (mapping_empty(mapping) || wbc->sy    1052         if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
1039                 return 0;                        1053                 return 0;
1040                                                  1054 
1041         trace_dax_writeback_range(inode, xas.    1055         trace_dax_writeback_range(inode, xas.xa_index, end_index);
1042                                                  1056 
1043         tag_pages_for_writeback(mapping, xas.    1057         tag_pages_for_writeback(mapping, xas.xa_index, end_index);
1044                                                  1058 
1045         xas_lock_irq(&xas);                      1059         xas_lock_irq(&xas);
1046         xas_for_each_marked(&xas, entry, end_    1060         xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1047                 ret = dax_writeback_one(&xas,    1061                 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1048                 if (ret < 0) {                   1062                 if (ret < 0) {
1049                         mapping_set_error(map    1063                         mapping_set_error(mapping, ret);
1050                         break;                   1064                         break;
1051                 }                                1065                 }
1052                 if (++scanned % XA_CHECK_SCHE    1066                 if (++scanned % XA_CHECK_SCHED)
1053                         continue;                1067                         continue;
1054                                                  1068 
1055                 xas_pause(&xas);                 1069                 xas_pause(&xas);
1056                 xas_unlock_irq(&xas);            1070                 xas_unlock_irq(&xas);
1057                 cond_resched();                  1071                 cond_resched();
1058                 xas_lock_irq(&xas);              1072                 xas_lock_irq(&xas);
1059         }                                        1073         }
1060         xas_unlock_irq(&xas);                    1074         xas_unlock_irq(&xas);
1061         trace_dax_writeback_range_done(inode,    1075         trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1062         return ret;                              1076         return ret;
1063 }                                                1077 }
1064 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range    1078 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1065                                                  1079 
1066 static int dax_iomap_direct_access(const stru    1080 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1067                 size_t size, void **kaddr, pf    1081                 size_t size, void **kaddr, pfn_t *pfnp)
1068 {                                                1082 {
1069         pgoff_t pgoff = dax_iomap_pgoff(iomap    1083         pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1070         int id, rc = 0;                          1084         int id, rc = 0;
1071         long length;                             1085         long length;
1072                                                  1086 
1073         id = dax_read_lock();                    1087         id = dax_read_lock();
1074         length = dax_direct_access(iomap->dax    1088         length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1075                                    DAX_ACCESS    1089                                    DAX_ACCESS, kaddr, pfnp);
1076         if (length < 0) {                        1090         if (length < 0) {
1077                 rc = length;                     1091                 rc = length;
1078                 goto out;                        1092                 goto out;
1079         }                                        1093         }
1080         if (!pfnp)                               1094         if (!pfnp)
1081                 goto out_check_addr;             1095                 goto out_check_addr;
1082         rc = -EINVAL;                            1096         rc = -EINVAL;
1083         if (PFN_PHYS(length) < size)             1097         if (PFN_PHYS(length) < size)
1084                 goto out;                        1098                 goto out;
1085         if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(s    1099         if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1086                 goto out;                        1100                 goto out;
1087         /* For larger pages we need devmap */    1101         /* For larger pages we need devmap */
1088         if (length > 1 && !pfn_t_devmap(*pfnp    1102         if (length > 1 && !pfn_t_devmap(*pfnp))
1089                 goto out;                        1103                 goto out;
1090         rc = 0;                                  1104         rc = 0;
1091                                                  1105 
1092 out_check_addr:                                  1106 out_check_addr:
1093         if (!kaddr)                              1107         if (!kaddr)
1094                 goto out;                        1108                 goto out;
1095         if (!*kaddr)                             1109         if (!*kaddr)
1096                 rc = -EFAULT;                    1110                 rc = -EFAULT;
1097 out:                                             1111 out:
1098         dax_read_unlock(id);                     1112         dax_read_unlock(id);
1099         return rc;                               1113         return rc;
1100 }                                                1114 }
1101                                                  1115 
1102 /**                                              1116 /**
1103  * dax_iomap_copy_around - Prepare for an una    1117  * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
1104  * by copying the data before and after the r    1118  * by copying the data before and after the range to be written.
1105  * @pos:        address to do copy from.         1119  * @pos:        address to do copy from.
1106  * @length:     size of copy operation.          1120  * @length:     size of copy operation.
1107  * @align_size: aligned w.r.t align_size (eit    1121  * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1108  * @srcmap:     iomap srcmap                     1122  * @srcmap:     iomap srcmap
1109  * @daddr:      destination address to copy t    1123  * @daddr:      destination address to copy to.
1110  *                                               1124  *
1111  * This can be called from two places. Either    1125  * This can be called from two places. Either during DAX write fault (page
1112  * aligned), to copy the length size data to     1126  * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1113  * write operation, dax_iomap_iter() might ca    1127  * write operation, dax_iomap_iter() might call this to do the copy of either
1114  * start or end unaligned address. In the lat    1128  * start or end unaligned address. In the latter case the rest of the copy of
1115  * aligned ranges is taken care by dax_iomap_    1129  * aligned ranges is taken care by dax_iomap_iter() itself.
1116  * If the srcmap contains invalid data, such     1130  * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the
1117  * area to make sure no old data remains.        1131  * area to make sure no old data remains.
1118  */                                              1132  */
1119 static int dax_iomap_copy_around(loff_t pos,     1133 static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
1120                 const struct iomap *srcmap, v    1134                 const struct iomap *srcmap, void *daddr)
1121 {                                                1135 {
1122         loff_t head_off = pos & (align_size -    1136         loff_t head_off = pos & (align_size - 1);
1123         size_t size = ALIGN(head_off + length    1137         size_t size = ALIGN(head_off + length, align_size);
1124         loff_t end = pos + length;               1138         loff_t end = pos + length;
1125         loff_t pg_end = round_up(end, align_s    1139         loff_t pg_end = round_up(end, align_size);
1126         /* copy_all is usually in page fault     1140         /* copy_all is usually in page fault case */
1127         bool copy_all = head_off == 0 && end     1141         bool copy_all = head_off == 0 && end == pg_end;
1128         /* zero the edges if srcmap is a HOLE    1142         /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
1129         bool zero_edge = srcmap->flags & IOMA    1143         bool zero_edge = srcmap->flags & IOMAP_F_SHARED ||
1130                          srcmap->type == IOMA    1144                          srcmap->type == IOMAP_UNWRITTEN;
1131         void *saddr = NULL;                   !! 1145         void *saddr = 0;
1132         int ret = 0;                             1146         int ret = 0;
1133                                                  1147 
1134         if (!zero_edge) {                        1148         if (!zero_edge) {
1135                 ret = dax_iomap_direct_access    1149                 ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1136                 if (ret)                         1150                 if (ret)
1137                         return dax_mem2blk_er !! 1151                         return ret;
1138         }                                        1152         }
1139                                                  1153 
1140         if (copy_all) {                          1154         if (copy_all) {
1141                 if (zero_edge)                   1155                 if (zero_edge)
1142                         memset(daddr, 0, size    1156                         memset(daddr, 0, size);
1143                 else                             1157                 else
1144                         ret = copy_mc_to_kern    1158                         ret = copy_mc_to_kernel(daddr, saddr, length);
1145                 goto out;                        1159                 goto out;
1146         }                                        1160         }
1147                                                  1161 
1148         /* Copy the head part of the range */    1162         /* Copy the head part of the range */
1149         if (head_off) {                          1163         if (head_off) {
1150                 if (zero_edge)                   1164                 if (zero_edge)
1151                         memset(daddr, 0, head    1165                         memset(daddr, 0, head_off);
1152                 else {                           1166                 else {
1153                         ret = copy_mc_to_kern    1167                         ret = copy_mc_to_kernel(daddr, saddr, head_off);
1154                         if (ret)                 1168                         if (ret)
1155                                 return -EIO;     1169                                 return -EIO;
1156                 }                                1170                 }
1157         }                                        1171         }
1158                                                  1172 
1159         /* Copy the tail part of the range */    1173         /* Copy the tail part of the range */
1160         if (end < pg_end) {                      1174         if (end < pg_end) {
1161                 loff_t tail_off = head_off +     1175                 loff_t tail_off = head_off + length;
1162                 loff_t tail_len = pg_end - en    1176                 loff_t tail_len = pg_end - end;
1163                                                  1177 
1164                 if (zero_edge)                   1178                 if (zero_edge)
1165                         memset(daddr + tail_o    1179                         memset(daddr + tail_off, 0, tail_len);
1166                 else {                           1180                 else {
1167                         ret = copy_mc_to_kern    1181                         ret = copy_mc_to_kernel(daddr + tail_off,
1168                                                  1182                                                 saddr + tail_off, tail_len);
1169                         if (ret)                 1183                         if (ret)
1170                                 return -EIO;     1184                                 return -EIO;
1171                 }                                1185                 }
1172         }                                        1186         }
1173 out:                                             1187 out:
1174         if (zero_edge)                           1188         if (zero_edge)
1175                 dax_flush(srcmap->dax_dev, da    1189                 dax_flush(srcmap->dax_dev, daddr, size);
1176         return ret ? -EIO : 0;                   1190         return ret ? -EIO : 0;
1177 }                                                1191 }
1178                                                  1192 
1179 /*                                               1193 /*
1180  * The user has performed a load from a hole     1194  * The user has performed a load from a hole in the file.  Allocating a new
1181  * page in the file would cause excessive sto    1195  * page in the file would cause excessive storage usage for workloads with
1182  * sparse files.  Instead we insert a read-on    1196  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1183  * If this page is ever written to we will re    1197  * If this page is ever written to we will re-fault and change the mapping to
1184  * point to real DAX storage instead.            1198  * point to real DAX storage instead.
1185  */                                              1199  */
1186 static vm_fault_t dax_load_hole(struct xa_sta    1200 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1187                 const struct iomap_iter *iter    1201                 const struct iomap_iter *iter, void **entry)
1188 {                                                1202 {
1189         struct inode *inode = iter->inode;       1203         struct inode *inode = iter->inode;
1190         unsigned long vaddr = vmf->address;      1204         unsigned long vaddr = vmf->address;
1191         pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(    1205         pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1192         vm_fault_t ret;                          1206         vm_fault_t ret;
1193                                                  1207 
1194         *entry = dax_insert_entry(xas, vmf, i    1208         *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
1195                                                  1209 
1196         ret = vmf_insert_mixed(vmf->vma, vadd    1210         ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1197         trace_dax_load_hole(inode, vmf, ret);    1211         trace_dax_load_hole(inode, vmf, ret);
1198         return ret;                              1212         return ret;
1199 }                                                1213 }
1200                                                  1214 
1201 #ifdef CONFIG_FS_DAX_PMD                         1215 #ifdef CONFIG_FS_DAX_PMD
1202 static vm_fault_t dax_pmd_load_hole(struct xa    1216 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1203                 const struct iomap_iter *iter    1217                 const struct iomap_iter *iter, void **entry)
1204 {                                                1218 {
1205         struct address_space *mapping = vmf->    1219         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1206         unsigned long pmd_addr = vmf->address    1220         unsigned long pmd_addr = vmf->address & PMD_MASK;
1207         struct vm_area_struct *vma = vmf->vma    1221         struct vm_area_struct *vma = vmf->vma;
1208         struct inode *inode = mapping->host;     1222         struct inode *inode = mapping->host;
1209         pgtable_t pgtable = NULL;                1223         pgtable_t pgtable = NULL;
1210         struct folio *zero_folio;             !! 1224         struct page *zero_page;
1211         spinlock_t *ptl;                         1225         spinlock_t *ptl;
1212         pmd_t pmd_entry;                         1226         pmd_t pmd_entry;
1213         pfn_t pfn;                               1227         pfn_t pfn;
1214                                                  1228 
1215         zero_folio = mm_get_huge_zero_folio(v !! 1229         zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1216                                                  1230 
1217         if (unlikely(!zero_folio))            !! 1231         if (unlikely(!zero_page))
1218                 goto fallback;                   1232                 goto fallback;
1219                                                  1233 
1220         pfn = page_to_pfn_t(&zero_folio->page !! 1234         pfn = page_to_pfn_t(zero_page);
1221         *entry = dax_insert_entry(xas, vmf, i    1235         *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1222                                   DAX_PMD | D    1236                                   DAX_PMD | DAX_ZERO_PAGE);
1223                                                  1237 
1224         if (arch_needs_pgtable_deposit()) {      1238         if (arch_needs_pgtable_deposit()) {
1225                 pgtable = pte_alloc_one(vma->    1239                 pgtable = pte_alloc_one(vma->vm_mm);
1226                 if (!pgtable)                    1240                 if (!pgtable)
1227                         return VM_FAULT_OOM;     1241                         return VM_FAULT_OOM;
1228         }                                        1242         }
1229                                                  1243 
1230         ptl = pmd_lock(vmf->vma->vm_mm, vmf->    1244         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1231         if (!pmd_none(*(vmf->pmd))) {            1245         if (!pmd_none(*(vmf->pmd))) {
1232                 spin_unlock(ptl);                1246                 spin_unlock(ptl);
1233                 goto fallback;                   1247                 goto fallback;
1234         }                                        1248         }
1235                                                  1249 
1236         if (pgtable) {                           1250         if (pgtable) {
1237                 pgtable_trans_huge_deposit(vm    1251                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1238                 mm_inc_nr_ptes(vma->vm_mm);      1252                 mm_inc_nr_ptes(vma->vm_mm);
1239         }                                        1253         }
1240         pmd_entry = mk_pmd(&zero_folio->page, !! 1254         pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1241         pmd_entry = pmd_mkhuge(pmd_entry);       1255         pmd_entry = pmd_mkhuge(pmd_entry);
1242         set_pmd_at(vmf->vma->vm_mm, pmd_addr,    1256         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1243         spin_unlock(ptl);                        1257         spin_unlock(ptl);
1244         trace_dax_pmd_load_hole(inode, vmf, z !! 1258         trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1245         return VM_FAULT_NOPAGE;                  1259         return VM_FAULT_NOPAGE;
1246                                                  1260 
1247 fallback:                                        1261 fallback:
1248         if (pgtable)                             1262         if (pgtable)
1249                 pte_free(vma->vm_mm, pgtable)    1263                 pte_free(vma->vm_mm, pgtable);
1250         trace_dax_pmd_load_hole_fallback(inod !! 1264         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1251         return VM_FAULT_FALLBACK;                1265         return VM_FAULT_FALLBACK;
1252 }                                                1266 }
1253 #else                                            1267 #else
1254 static vm_fault_t dax_pmd_load_hole(struct xa    1268 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1255                 const struct iomap_iter *iter    1269                 const struct iomap_iter *iter, void **entry)
1256 {                                                1270 {
1257         return VM_FAULT_FALLBACK;                1271         return VM_FAULT_FALLBACK;
1258 }                                                1272 }
1259 #endif /* CONFIG_FS_DAX_PMD */                   1273 #endif /* CONFIG_FS_DAX_PMD */
1260                                                  1274 
1261 static s64 dax_unshare_iter(struct iomap_iter    1275 static s64 dax_unshare_iter(struct iomap_iter *iter)
1262 {                                                1276 {
1263         struct iomap *iomap = &iter->iomap;      1277         struct iomap *iomap = &iter->iomap;
1264         const struct iomap *srcmap = iomap_it    1278         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1265         loff_t copy_pos = iter->pos;          !! 1279         loff_t pos = iter->pos;
1266         u64 copy_len = iomap_length(iter);    !! 1280         loff_t length = iomap_length(iter);
1267         u32 mod;                              << 
1268         int id = 0;                              1281         int id = 0;
1269         s64 ret = 0;                             1282         s64 ret = 0;
1270         void *daddr = NULL, *saddr = NULL;       1283         void *daddr = NULL, *saddr = NULL;
1271                                                  1284 
1272         if (!iomap_want_unshare_iter(iter))   !! 1285         /* don't bother with blocks that are not shared to start with */
1273                 return iomap_length(iter);    !! 1286         if (!(iomap->flags & IOMAP_F_SHARED))
1274                                               !! 1287                 return length;
1275         /*                                    << 
1276          * Extend the file range to be aligne << 
1277          * we need to copy entire blocks, not << 
1278          * Invalidate the mapping because we' << 
1279          */                                   << 
1280         mod = offset_in_page(copy_pos);       << 
1281         if (mod) {                            << 
1282                 copy_len += mod;              << 
1283                 copy_pos -= mod;              << 
1284         }                                     << 
1285                                               << 
1286         mod = offset_in_page(copy_pos + copy_ << 
1287         if (mod)                              << 
1288                 copy_len += PAGE_SIZE - mod;  << 
1289                                               << 
1290         invalidate_inode_pages2_range(iter->i << 
1291                                       copy_po << 
1292                                       (copy_p << 
1293                                                  1288 
1294         id = dax_read_lock();                    1289         id = dax_read_lock();
1295         ret = dax_iomap_direct_access(iomap,  !! 1290         ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
1296         if (ret < 0)                             1291         if (ret < 0)
1297                 goto out_unlock;                 1292                 goto out_unlock;
1298                                                  1293 
1299         ret = dax_iomap_direct_access(srcmap, !! 1294         /* zero the distance if srcmap is HOLE or UNWRITTEN */
                                                   >> 1295         if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) {
                                                   >> 1296                 memset(daddr, 0, length);
                                                   >> 1297                 dax_flush(iomap->dax_dev, daddr, length);
                                                   >> 1298                 ret = length;
                                                   >> 1299                 goto out_unlock;
                                                   >> 1300         }
                                                   >> 1301 
                                                   >> 1302         ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
1300         if (ret < 0)                             1303         if (ret < 0)
1301                 goto out_unlock;                 1304                 goto out_unlock;
1302                                                  1305 
1303         if (copy_mc_to_kernel(daddr, saddr, c !! 1306         if (copy_mc_to_kernel(daddr, saddr, length) == 0)
1304                 ret = iomap_length(iter);     !! 1307                 ret = length;
1305         else                                     1308         else
1306                 ret = -EIO;                      1309                 ret = -EIO;
1307                                                  1310 
1308 out_unlock:                                      1311 out_unlock:
1309         dax_read_unlock(id);                     1312         dax_read_unlock(id);
1310         return dax_mem2blk_err(ret);          !! 1313         return ret;
1311 }                                                1314 }
1312                                                  1315 
1313 int dax_file_unshare(struct inode *inode, lof    1316 int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1314                 const struct iomap_ops *ops)     1317                 const struct iomap_ops *ops)
1315 {                                                1318 {
1316         struct iomap_iter iter = {               1319         struct iomap_iter iter = {
1317                 .inode          = inode,         1320                 .inode          = inode,
1318                 .pos            = pos,           1321                 .pos            = pos,
                                                   >> 1322                 .len            = len,
1319                 .flags          = IOMAP_WRITE    1323                 .flags          = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
1320         };                                       1324         };
1321         loff_t size = i_size_read(inode);     << 
1322         int ret;                                 1325         int ret;
1323                                                  1326 
1324         if (pos < 0 || pos >= size)           << 
1325                 return 0;                     << 
1326                                               << 
1327         iter.len = min(len, size - pos);      << 
1328         while ((ret = iomap_iter(&iter, ops))    1327         while ((ret = iomap_iter(&iter, ops)) > 0)
1329                 iter.processed = dax_unshare_    1328                 iter.processed = dax_unshare_iter(&iter);
1330         return ret;                              1329         return ret;
1331 }                                                1330 }
1332 EXPORT_SYMBOL_GPL(dax_file_unshare);             1331 EXPORT_SYMBOL_GPL(dax_file_unshare);
1333                                                  1332 
1334 static int dax_memzero(struct iomap_iter *ite    1333 static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
1335 {                                                1334 {
1336         const struct iomap *iomap = &iter->io    1335         const struct iomap *iomap = &iter->iomap;
1337         const struct iomap *srcmap = iomap_it    1336         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1338         unsigned offset = offset_in_page(pos)    1337         unsigned offset = offset_in_page(pos);
1339         pgoff_t pgoff = dax_iomap_pgoff(iomap    1338         pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1340         void *kaddr;                             1339         void *kaddr;
1341         long ret;                                1340         long ret;
1342                                                  1341 
1343         ret = dax_direct_access(iomap->dax_de    1342         ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
1344                                 NULL);           1343                                 NULL);
1345         if (ret < 0)                             1344         if (ret < 0)
1346                 return dax_mem2blk_err(ret);  !! 1345                 return ret;
1347                                               << 
1348         memset(kaddr + offset, 0, size);         1346         memset(kaddr + offset, 0, size);
1349         if (iomap->flags & IOMAP_F_SHARED)       1347         if (iomap->flags & IOMAP_F_SHARED)
1350                 ret = dax_iomap_copy_around(p    1348                 ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
1351                                             k    1349                                             kaddr);
1352         else                                     1350         else
1353                 dax_flush(iomap->dax_dev, kad    1351                 dax_flush(iomap->dax_dev, kaddr + offset, size);
1354         return ret;                              1352         return ret;
1355 }                                                1353 }
1356                                                  1354 
1357 static s64 dax_zero_iter(struct iomap_iter *i    1355 static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
1358 {                                                1356 {
1359         const struct iomap *iomap = &iter->io    1357         const struct iomap *iomap = &iter->iomap;
1360         const struct iomap *srcmap = iomap_it    1358         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1361         loff_t pos = iter->pos;                  1359         loff_t pos = iter->pos;
1362         u64 length = iomap_length(iter);         1360         u64 length = iomap_length(iter);
1363         s64 written = 0;                         1361         s64 written = 0;
1364                                                  1362 
1365         /* already zeroed?  we're done. */       1363         /* already zeroed?  we're done. */
1366         if (srcmap->type == IOMAP_HOLE || src    1364         if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1367                 return length;                   1365                 return length;
1368                                                  1366 
1369         /*                                       1367         /*
1370          * invalidate the pages whose sharing    1368          * invalidate the pages whose sharing state is to be changed
1371          * because of CoW.                       1369          * because of CoW.
1372          */                                      1370          */
1373         if (iomap->flags & IOMAP_F_SHARED)       1371         if (iomap->flags & IOMAP_F_SHARED)
1374                 invalidate_inode_pages2_range    1372                 invalidate_inode_pages2_range(iter->inode->i_mapping,
1375                                                  1373                                               pos >> PAGE_SHIFT,
1376                                                  1374                                               (pos + length - 1) >> PAGE_SHIFT);
1377                                                  1375 
1378         do {                                     1376         do {
1379                 unsigned offset = offset_in_p    1377                 unsigned offset = offset_in_page(pos);
1380                 unsigned size = min_t(u64, PA    1378                 unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1381                 pgoff_t pgoff = dax_iomap_pgo    1379                 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1382                 long rc;                         1380                 long rc;
1383                 int id;                          1381                 int id;
1384                                                  1382 
1385                 id = dax_read_lock();            1383                 id = dax_read_lock();
1386                 if (IS_ALIGNED(pos, PAGE_SIZE    1384                 if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1387                         rc = dax_zero_page_ra    1385                         rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1388                 else                             1386                 else
1389                         rc = dax_memzero(iter    1387                         rc = dax_memzero(iter, pos, size);
1390                 dax_read_unlock(id);             1388                 dax_read_unlock(id);
1391                                                  1389 
1392                 if (rc < 0)                      1390                 if (rc < 0)
1393                         return rc;               1391                         return rc;
1394                 pos += size;                     1392                 pos += size;
1395                 length -= size;                  1393                 length -= size;
1396                 written += size;                 1394                 written += size;
1397         } while (length > 0);                    1395         } while (length > 0);
1398                                                  1396 
1399         if (did_zero)                            1397         if (did_zero)
1400                 *did_zero = true;                1398                 *did_zero = true;
1401         return written;                          1399         return written;
1402 }                                                1400 }
1403                                                  1401 
1404 int dax_zero_range(struct inode *inode, loff_    1402 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1405                 const struct iomap_ops *ops)     1403                 const struct iomap_ops *ops)
1406 {                                                1404 {
1407         struct iomap_iter iter = {               1405         struct iomap_iter iter = {
1408                 .inode          = inode,         1406                 .inode          = inode,
1409                 .pos            = pos,           1407                 .pos            = pos,
1410                 .len            = len,           1408                 .len            = len,
1411                 .flags          = IOMAP_DAX |    1409                 .flags          = IOMAP_DAX | IOMAP_ZERO,
1412         };                                       1410         };
1413         int ret;                                 1411         int ret;
1414                                                  1412 
1415         while ((ret = iomap_iter(&iter, ops))    1413         while ((ret = iomap_iter(&iter, ops)) > 0)
1416                 iter.processed = dax_zero_ite    1414                 iter.processed = dax_zero_iter(&iter, did_zero);
1417         return ret;                              1415         return ret;
1418 }                                                1416 }
1419 EXPORT_SYMBOL_GPL(dax_zero_range);               1417 EXPORT_SYMBOL_GPL(dax_zero_range);
1420                                                  1418 
1421 int dax_truncate_page(struct inode *inode, lo    1419 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1422                 const struct iomap_ops *ops)     1420                 const struct iomap_ops *ops)
1423 {                                                1421 {
1424         unsigned int blocksize = i_blocksize(    1422         unsigned int blocksize = i_blocksize(inode);
1425         unsigned int off = pos & (blocksize -    1423         unsigned int off = pos & (blocksize - 1);
1426                                                  1424 
1427         /* Block boundary? Nothing to do */      1425         /* Block boundary? Nothing to do */
1428         if (!off)                                1426         if (!off)
1429                 return 0;                        1427                 return 0;
1430         return dax_zero_range(inode, pos, blo    1428         return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
1431 }                                                1429 }
1432 EXPORT_SYMBOL_GPL(dax_truncate_page);            1430 EXPORT_SYMBOL_GPL(dax_truncate_page);
1433                                                  1431 
1434 static loff_t dax_iomap_iter(const struct iom    1432 static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1435                 struct iov_iter *iter)           1433                 struct iov_iter *iter)
1436 {                                                1434 {
1437         const struct iomap *iomap = &iomi->io    1435         const struct iomap *iomap = &iomi->iomap;
1438         const struct iomap *srcmap = iomap_it    1436         const struct iomap *srcmap = iomap_iter_srcmap(iomi);
1439         loff_t length = iomap_length(iomi);      1437         loff_t length = iomap_length(iomi);
1440         loff_t pos = iomi->pos;                  1438         loff_t pos = iomi->pos;
1441         struct dax_device *dax_dev = iomap->d    1439         struct dax_device *dax_dev = iomap->dax_dev;
1442         loff_t end = pos + length, done = 0;     1440         loff_t end = pos + length, done = 0;
1443         bool write = iov_iter_rw(iter) == WRI    1441         bool write = iov_iter_rw(iter) == WRITE;
1444         bool cow = write && iomap->flags & IO    1442         bool cow = write && iomap->flags & IOMAP_F_SHARED;
1445         ssize_t ret = 0;                         1443         ssize_t ret = 0;
1446         size_t xfer;                             1444         size_t xfer;
1447         int id;                                  1445         int id;
1448                                                  1446 
1449         if (!write) {                            1447         if (!write) {
1450                 end = min(end, i_size_read(io    1448                 end = min(end, i_size_read(iomi->inode));
1451                 if (pos >= end)                  1449                 if (pos >= end)
1452                         return 0;                1450                         return 0;
1453                                                  1451 
1454                 if (iomap->type == IOMAP_HOLE    1452                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1455                         return iov_iter_zero(    1453                         return iov_iter_zero(min(length, end - pos), iter);
1456         }                                        1454         }
1457                                                  1455 
1458         /*                                       1456         /*
1459          * In DAX mode, enforce either pure o    1457          * In DAX mode, enforce either pure overwrites of written extents, or
1460          * writes to unwritten extents as par    1458          * writes to unwritten extents as part of a copy-on-write operation.
1461          */                                      1459          */
1462         if (WARN_ON_ONCE(iomap->type != IOMAP    1460         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1463                         !(iomap->flags & IOMA    1461                         !(iomap->flags & IOMAP_F_SHARED)))
1464                 return -EIO;                     1462                 return -EIO;
1465                                                  1463 
1466         /*                                       1464         /*
1467          * Write can allocate block for an ar    1465          * Write can allocate block for an area which has a hole page mapped
1468          * into page tables. We have to tear     1466          * into page tables. We have to tear down these mappings so that data
1469          * written by write(2) is visible in     1467          * written by write(2) is visible in mmap.
1470          */                                      1468          */
1471         if (iomap->flags & IOMAP_F_NEW || cow    1469         if (iomap->flags & IOMAP_F_NEW || cow) {
1472                 /*                               1470                 /*
1473                  * Filesystem allows CoW on n    1471                  * Filesystem allows CoW on non-shared extents. The src extents
1474                  * may have been mmapped with    1472                  * may have been mmapped with dirty mark before. To be able to
1475                  * invalidate its dax entries    1473                  * invalidate its dax entries, we need to clear the dirty mark
1476                  * in advance.                   1474                  * in advance.
1477                  */                              1475                  */
1478                 if (cow)                         1476                 if (cow)
1479                         __dax_clear_dirty_ran    1477                         __dax_clear_dirty_range(iomi->inode->i_mapping,
1480                                                  1478                                                 pos >> PAGE_SHIFT,
1481                                                  1479                                                 (end - 1) >> PAGE_SHIFT);
1482                 invalidate_inode_pages2_range    1480                 invalidate_inode_pages2_range(iomi->inode->i_mapping,
1483                                                  1481                                               pos >> PAGE_SHIFT,
1484                                                  1482                                               (end - 1) >> PAGE_SHIFT);
1485         }                                        1483         }
1486                                                  1484 
1487         id = dax_read_lock();                    1485         id = dax_read_lock();
1488         while (pos < end) {                      1486         while (pos < end) {
1489                 unsigned offset = pos & (PAGE    1487                 unsigned offset = pos & (PAGE_SIZE - 1);
1490                 const size_t size = ALIGN(len    1488                 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1491                 pgoff_t pgoff = dax_iomap_pgo    1489                 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1492                 ssize_t map_len;                 1490                 ssize_t map_len;
1493                 bool recovery = false;           1491                 bool recovery = false;
1494                 void *kaddr;                     1492                 void *kaddr;
1495                                                  1493 
1496                 if (fatal_signal_pending(curr    1494                 if (fatal_signal_pending(current)) {
1497                         ret = -EINTR;            1495                         ret = -EINTR;
1498                         break;                   1496                         break;
1499                 }                                1497                 }
1500                                                  1498 
1501                 map_len = dax_direct_access(d    1499                 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1502                                 DAX_ACCESS, &    1500                                 DAX_ACCESS, &kaddr, NULL);
1503                 if (map_len == -EHWPOISON &&  !! 1501                 if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
1504                         map_len = dax_direct_    1502                         map_len = dax_direct_access(dax_dev, pgoff,
1505                                         PHYS_    1503                                         PHYS_PFN(size), DAX_RECOVERY_WRITE,
1506                                         &kadd    1504                                         &kaddr, NULL);
1507                         if (map_len > 0)         1505                         if (map_len > 0)
1508                                 recovery = tr    1506                                 recovery = true;
1509                 }                                1507                 }
1510                 if (map_len < 0) {               1508                 if (map_len < 0) {
1511                         ret = dax_mem2blk_err !! 1509                         ret = map_len;
1512                         break;                   1510                         break;
1513                 }                                1511                 }
1514                                                  1512 
1515                 if (cow) {                       1513                 if (cow) {
1516                         ret = dax_iomap_copy_    1514                         ret = dax_iomap_copy_around(pos, length, PAGE_SIZE,
1517                                                  1515                                                     srcmap, kaddr);
1518                         if (ret)                 1516                         if (ret)
1519                                 break;           1517                                 break;
1520                 }                                1518                 }
1521                                                  1519 
1522                 map_len = PFN_PHYS(map_len);     1520                 map_len = PFN_PHYS(map_len);
1523                 kaddr += offset;                 1521                 kaddr += offset;
1524                 map_len -= offset;               1522                 map_len -= offset;
1525                 if (map_len > end - pos)         1523                 if (map_len > end - pos)
1526                         map_len = end - pos;     1524                         map_len = end - pos;
1527                                                  1525 
1528                 if (recovery)                    1526                 if (recovery)
1529                         xfer = dax_recovery_w    1527                         xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1530                                         map_l    1528                                         map_len, iter);
1531                 else if (write)                  1529                 else if (write)
1532                         xfer = dax_copy_from_    1530                         xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1533                                         map_l    1531                                         map_len, iter);
1534                 else                             1532                 else
1535                         xfer = dax_copy_to_it    1533                         xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1536                                         map_l    1534                                         map_len, iter);
1537                                                  1535 
1538                 pos += xfer;                     1536                 pos += xfer;
1539                 length -= xfer;                  1537                 length -= xfer;
1540                 done += xfer;                    1538                 done += xfer;
1541                                                  1539 
1542                 if (xfer == 0)                   1540                 if (xfer == 0)
1543                         ret = -EFAULT;           1541                         ret = -EFAULT;
1544                 if (xfer < map_len)              1542                 if (xfer < map_len)
1545                         break;                   1543                         break;
1546         }                                        1544         }
1547         dax_read_unlock(id);                     1545         dax_read_unlock(id);
1548                                                  1546 
1549         return done ? done : ret;                1547         return done ? done : ret;
1550 }                                                1548 }
1551                                                  1549 
1552 /**                                              1550 /**
1553  * dax_iomap_rw - Perform I/O to a DAX file      1551  * dax_iomap_rw - Perform I/O to a DAX file
1554  * @iocb:       The control block for this I/    1552  * @iocb:       The control block for this I/O
1555  * @iter:       The addresses to do I/O from     1553  * @iter:       The addresses to do I/O from or to
1556  * @ops:        iomap ops passed from the fil    1554  * @ops:        iomap ops passed from the file system
1557  *                                               1555  *
1558  * This function performs read and write oper    1556  * This function performs read and write operations to directly mapped
1559  * persistent memory.  The callers needs to t    1557  * persistent memory.  The callers needs to take care of read/write exclusion
1560  * and evicting any page cache pages in the r    1558  * and evicting any page cache pages in the region under I/O.
1561  */                                              1559  */
1562 ssize_t                                          1560 ssize_t
1563 dax_iomap_rw(struct kiocb *iocb, struct iov_i    1561 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1564                 const struct iomap_ops *ops)     1562                 const struct iomap_ops *ops)
1565 {                                                1563 {
1566         struct iomap_iter iomi = {               1564         struct iomap_iter iomi = {
1567                 .inode          = iocb->ki_fi    1565                 .inode          = iocb->ki_filp->f_mapping->host,
1568                 .pos            = iocb->ki_po    1566                 .pos            = iocb->ki_pos,
1569                 .len            = iov_iter_co    1567                 .len            = iov_iter_count(iter),
1570                 .flags          = IOMAP_DAX,     1568                 .flags          = IOMAP_DAX,
1571         };                                       1569         };
1572         loff_t done = 0;                         1570         loff_t done = 0;
1573         int ret;                                 1571         int ret;
1574                                                  1572 
1575         if (!iomi.len)                           1573         if (!iomi.len)
1576                 return 0;                        1574                 return 0;
1577                                                  1575 
1578         if (iov_iter_rw(iter) == WRITE) {        1576         if (iov_iter_rw(iter) == WRITE) {
1579                 lockdep_assert_held_write(&io    1577                 lockdep_assert_held_write(&iomi.inode->i_rwsem);
1580                 iomi.flags |= IOMAP_WRITE;       1578                 iomi.flags |= IOMAP_WRITE;
1581         } else {                                 1579         } else {
1582                 lockdep_assert_held(&iomi.ino    1580                 lockdep_assert_held(&iomi.inode->i_rwsem);
1583         }                                        1581         }
1584                                                  1582 
1585         if (iocb->ki_flags & IOCB_NOWAIT)        1583         if (iocb->ki_flags & IOCB_NOWAIT)
1586                 iomi.flags |= IOMAP_NOWAIT;      1584                 iomi.flags |= IOMAP_NOWAIT;
1587                                                  1585 
1588         while ((ret = iomap_iter(&iomi, ops))    1586         while ((ret = iomap_iter(&iomi, ops)) > 0)
1589                 iomi.processed = dax_iomap_it    1587                 iomi.processed = dax_iomap_iter(&iomi, iter);
1590                                                  1588 
1591         done = iomi.pos - iocb->ki_pos;          1589         done = iomi.pos - iocb->ki_pos;
1592         iocb->ki_pos = iomi.pos;                 1590         iocb->ki_pos = iomi.pos;
1593         return done ? done : ret;                1591         return done ? done : ret;
1594 }                                                1592 }
1595 EXPORT_SYMBOL_GPL(dax_iomap_rw);                 1593 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1596                                                  1594 
1597 static vm_fault_t dax_fault_return(int error)    1595 static vm_fault_t dax_fault_return(int error)
1598 {                                                1596 {
1599         if (error == 0)                          1597         if (error == 0)
1600                 return VM_FAULT_NOPAGE;          1598                 return VM_FAULT_NOPAGE;
1601         return vmf_error(error);                 1599         return vmf_error(error);
1602 }                                                1600 }
1603                                                  1601 
1604 /*                                               1602 /*
1605  * When handling a synchronous page fault and    1603  * When handling a synchronous page fault and the inode need a fsync, we can
1606  * insert the PTE/PMD into page tables only a    1604  * insert the PTE/PMD into page tables only after that fsync happened. Skip
1607  * insertion for now and return the pfn so th    1605  * insertion for now and return the pfn so that caller can insert it after the
1608  * fsync is done.                                1606  * fsync is done.
1609  */                                              1607  */
1610 static vm_fault_t dax_fault_synchronous_pfnp(    1608 static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1611 {                                                1609 {
1612         if (WARN_ON_ONCE(!pfnp))                 1610         if (WARN_ON_ONCE(!pfnp))
1613                 return VM_FAULT_SIGBUS;          1611                 return VM_FAULT_SIGBUS;
1614         *pfnp = pfn;                             1612         *pfnp = pfn;
1615         return VM_FAULT_NEEDDSYNC;               1613         return VM_FAULT_NEEDDSYNC;
1616 }                                                1614 }
1617                                                  1615 
1618 static vm_fault_t dax_fault_cow_page(struct v    1616 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1619                 const struct iomap_iter *iter    1617                 const struct iomap_iter *iter)
1620 {                                                1618 {
1621         vm_fault_t ret;                          1619         vm_fault_t ret;
1622         int error = 0;                           1620         int error = 0;
1623                                                  1621 
1624         switch (iter->iomap.type) {              1622         switch (iter->iomap.type) {
1625         case IOMAP_HOLE:                         1623         case IOMAP_HOLE:
1626         case IOMAP_UNWRITTEN:                    1624         case IOMAP_UNWRITTEN:
1627                 clear_user_highpage(vmf->cow_    1625                 clear_user_highpage(vmf->cow_page, vmf->address);
1628                 break;                           1626                 break;
1629         case IOMAP_MAPPED:                       1627         case IOMAP_MAPPED:
1630                 error = copy_cow_page_dax(vmf    1628                 error = copy_cow_page_dax(vmf, iter);
1631                 break;                           1629                 break;
1632         default:                                 1630         default:
1633                 WARN_ON_ONCE(1);                 1631                 WARN_ON_ONCE(1);
1634                 error = -EIO;                    1632                 error = -EIO;
1635                 break;                           1633                 break;
1636         }                                        1634         }
1637                                                  1635 
1638         if (error)                               1636         if (error)
1639                 return dax_fault_return(error    1637                 return dax_fault_return(error);
1640                                                  1638 
1641         __SetPageUptodate(vmf->cow_page);        1639         __SetPageUptodate(vmf->cow_page);
1642         ret = finish_fault(vmf);                 1640         ret = finish_fault(vmf);
1643         if (!ret)                                1641         if (!ret)
1644                 return VM_FAULT_DONE_COW;        1642                 return VM_FAULT_DONE_COW;
1645         return ret;                              1643         return ret;
1646 }                                                1644 }
1647                                                  1645 
1648 /**                                              1646 /**
1649  * dax_fault_iter - Common actor to handle pf    1647  * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1650  * @vmf:        vm fault instance                1648  * @vmf:        vm fault instance
1651  * @iter:       iomap iter                       1649  * @iter:       iomap iter
1652  * @pfnp:       pfn to be returned               1650  * @pfnp:       pfn to be returned
1653  * @xas:        the dax mapping tree of a fil    1651  * @xas:        the dax mapping tree of a file
1654  * @entry:      an unlocked dax entry to be i    1652  * @entry:      an unlocked dax entry to be inserted
1655  * @pmd:        distinguish whether it is a p    1653  * @pmd:        distinguish whether it is a pmd fault
1656  */                                              1654  */
1657 static vm_fault_t dax_fault_iter(struct vm_fa    1655 static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1658                 const struct iomap_iter *iter    1656                 const struct iomap_iter *iter, pfn_t *pfnp,
1659                 struct xa_state *xas, void **    1657                 struct xa_state *xas, void **entry, bool pmd)
1660 {                                                1658 {
1661         const struct iomap *iomap = &iter->io    1659         const struct iomap *iomap = &iter->iomap;
1662         const struct iomap *srcmap = iomap_it    1660         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1663         size_t size = pmd ? PMD_SIZE : PAGE_S    1661         size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1664         loff_t pos = (loff_t)xas->xa_index <<    1662         loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1665         bool write = iter->flags & IOMAP_WRIT    1663         bool write = iter->flags & IOMAP_WRITE;
1666         unsigned long entry_flags = pmd ? DAX    1664         unsigned long entry_flags = pmd ? DAX_PMD : 0;
1667         int err = 0;                             1665         int err = 0;
1668         pfn_t pfn;                               1666         pfn_t pfn;
1669         void *kaddr;                             1667         void *kaddr;
1670                                                  1668 
1671         if (!pmd && vmf->cow_page)               1669         if (!pmd && vmf->cow_page)
1672                 return dax_fault_cow_page(vmf    1670                 return dax_fault_cow_page(vmf, iter);
1673                                                  1671 
1674         /* if we are reading UNWRITTEN and HO    1672         /* if we are reading UNWRITTEN and HOLE, return a hole. */
1675         if (!write &&                            1673         if (!write &&
1676             (iomap->type == IOMAP_UNWRITTEN |    1674             (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1677                 if (!pmd)                        1675                 if (!pmd)
1678                         return dax_load_hole(    1676                         return dax_load_hole(xas, vmf, iter, entry);
1679                 return dax_pmd_load_hole(xas,    1677                 return dax_pmd_load_hole(xas, vmf, iter, entry);
1680         }                                        1678         }
1681                                                  1679 
1682         if (iomap->type != IOMAP_MAPPED && !(    1680         if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
1683                 WARN_ON_ONCE(1);                 1681                 WARN_ON_ONCE(1);
1684                 return pmd ? VM_FAULT_FALLBAC    1682                 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1685         }                                        1683         }
1686                                                  1684 
1687         err = dax_iomap_direct_access(iomap,     1685         err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
1688         if (err)                                 1686         if (err)
1689                 return pmd ? VM_FAULT_FALLBAC    1687                 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1690                                                  1688 
1691         *entry = dax_insert_entry(xas, vmf, i    1689         *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
1692                                                  1690 
1693         if (write && iomap->flags & IOMAP_F_S    1691         if (write && iomap->flags & IOMAP_F_SHARED) {
1694                 err = dax_iomap_copy_around(p    1692                 err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr);
1695                 if (err)                         1693                 if (err)
1696                         return dax_fault_retu    1694                         return dax_fault_return(err);
1697         }                                        1695         }
1698                                                  1696 
1699         if (dax_fault_is_synchronous(iter, vm    1697         if (dax_fault_is_synchronous(iter, vmf->vma))
1700                 return dax_fault_synchronous_    1698                 return dax_fault_synchronous_pfnp(pfnp, pfn);
1701                                                  1699 
1702         /* insert PMD pfn */                     1700         /* insert PMD pfn */
1703         if (pmd)                                 1701         if (pmd)
1704                 return vmf_insert_pfn_pmd(vmf    1702                 return vmf_insert_pfn_pmd(vmf, pfn, write);
1705                                                  1703 
1706         /* insert PTE pfn */                     1704         /* insert PTE pfn */
1707         if (write)                               1705         if (write)
1708                 return vmf_insert_mixed_mkwri    1706                 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1709         return vmf_insert_mixed(vmf->vma, vmf    1707         return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1710 }                                                1708 }
1711                                                  1709 
1712 static vm_fault_t dax_iomap_pte_fault(struct     1710 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1713                                int *iomap_err    1711                                int *iomap_errp, const struct iomap_ops *ops)
1714 {                                                1712 {
1715         struct address_space *mapping = vmf->    1713         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1716         XA_STATE(xas, &mapping->i_pages, vmf-    1714         XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1717         struct iomap_iter iter = {               1715         struct iomap_iter iter = {
1718                 .inode          = mapping->ho    1716                 .inode          = mapping->host,
1719                 .pos            = (loff_t)vmf    1717                 .pos            = (loff_t)vmf->pgoff << PAGE_SHIFT,
1720                 .len            = PAGE_SIZE,     1718                 .len            = PAGE_SIZE,
1721                 .flags          = IOMAP_DAX |    1719                 .flags          = IOMAP_DAX | IOMAP_FAULT,
1722         };                                       1720         };
1723         vm_fault_t ret = 0;                      1721         vm_fault_t ret = 0;
1724         void *entry;                             1722         void *entry;
1725         int error;                               1723         int error;
1726                                                  1724 
1727         trace_dax_pte_fault(iter.inode, vmf,     1725         trace_dax_pte_fault(iter.inode, vmf, ret);
1728         /*                                       1726         /*
1729          * Check whether offset isn't beyond     1727          * Check whether offset isn't beyond end of file now. Caller is supposed
1730          * to hold locks serializing us with     1728          * to hold locks serializing us with truncate / punch hole so this is
1731          * a reliable test.                      1729          * a reliable test.
1732          */                                      1730          */
1733         if (iter.pos >= i_size_read(iter.inod    1731         if (iter.pos >= i_size_read(iter.inode)) {
1734                 ret = VM_FAULT_SIGBUS;           1732                 ret = VM_FAULT_SIGBUS;
1735                 goto out;                        1733                 goto out;
1736         }                                        1734         }
1737                                                  1735 
1738         if ((vmf->flags & FAULT_FLAG_WRITE) &    1736         if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1739                 iter.flags |= IOMAP_WRITE;       1737                 iter.flags |= IOMAP_WRITE;
1740                                                  1738 
1741         entry = grab_mapping_entry(&xas, mapp    1739         entry = grab_mapping_entry(&xas, mapping, 0);
1742         if (xa_is_internal(entry)) {             1740         if (xa_is_internal(entry)) {
1743                 ret = xa_to_internal(entry);     1741                 ret = xa_to_internal(entry);
1744                 goto out;                        1742                 goto out;
1745         }                                        1743         }
1746                                                  1744 
1747         /*                                       1745         /*
1748          * It is possible, particularly with     1746          * It is possible, particularly with mixed reads & writes to private
1749          * mappings, that we have raced with     1747          * mappings, that we have raced with a PMD fault that overlaps with
1750          * the PTE we need to set up.  If so     1748          * the PTE we need to set up.  If so just return and the fault will be
1751          * retried.                              1749          * retried.
1752          */                                      1750          */
1753         if (pmd_trans_huge(*vmf->pmd) || pmd_    1751         if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1754                 ret = VM_FAULT_NOPAGE;           1752                 ret = VM_FAULT_NOPAGE;
1755                 goto unlock_entry;               1753                 goto unlock_entry;
1756         }                                        1754         }
1757                                                  1755 
1758         while ((error = iomap_iter(&iter, ops    1756         while ((error = iomap_iter(&iter, ops)) > 0) {
1759                 if (WARN_ON_ONCE(iomap_length    1757                 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1760                         iter.processed = -EIO    1758                         iter.processed = -EIO;  /* fs corruption? */
1761                         continue;                1759                         continue;
1762                 }                                1760                 }
1763                                                  1761 
1764                 ret = dax_fault_iter(vmf, &it    1762                 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1765                 if (ret != VM_FAULT_SIGBUS &&    1763                 if (ret != VM_FAULT_SIGBUS &&
1766                     (iter.iomap.flags & IOMAP    1764                     (iter.iomap.flags & IOMAP_F_NEW)) {
1767                         count_vm_event(PGMAJF    1765                         count_vm_event(PGMAJFAULT);
1768                         count_memcg_event_mm(    1766                         count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1769                         ret |= VM_FAULT_MAJOR    1767                         ret |= VM_FAULT_MAJOR;
1770                 }                                1768                 }
1771                                                  1769 
1772                 if (!(ret & VM_FAULT_ERROR))     1770                 if (!(ret & VM_FAULT_ERROR))
1773                         iter.processed = PAGE    1771                         iter.processed = PAGE_SIZE;
1774         }                                        1772         }
1775                                                  1773 
1776         if (iomap_errp)                          1774         if (iomap_errp)
1777                 *iomap_errp = error;             1775                 *iomap_errp = error;
1778         if (!ret && error)                       1776         if (!ret && error)
1779                 ret = dax_fault_return(error)    1777                 ret = dax_fault_return(error);
1780                                                  1778 
1781 unlock_entry:                                    1779 unlock_entry:
1782         dax_unlock_entry(&xas, entry);           1780         dax_unlock_entry(&xas, entry);
1783 out:                                             1781 out:
1784         trace_dax_pte_fault_done(iter.inode,     1782         trace_dax_pte_fault_done(iter.inode, vmf, ret);
1785         return ret;                              1783         return ret;
1786 }                                                1784 }
1787                                                  1785 
1788 #ifdef CONFIG_FS_DAX_PMD                         1786 #ifdef CONFIG_FS_DAX_PMD
1789 static bool dax_fault_check_fallback(struct v    1787 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1790                 pgoff_t max_pgoff)               1788                 pgoff_t max_pgoff)
1791 {                                                1789 {
1792         unsigned long pmd_addr = vmf->address    1790         unsigned long pmd_addr = vmf->address & PMD_MASK;
1793         bool write = vmf->flags & FAULT_FLAG_    1791         bool write = vmf->flags & FAULT_FLAG_WRITE;
1794                                                  1792 
1795         /*                                       1793         /*
1796          * Make sure that the faulting addres    1794          * Make sure that the faulting address's PMD offset (color) matches
1797          * the PMD offset from the start of t    1795          * the PMD offset from the start of the file.  This is necessary so
1798          * that a PMD range in the page table    1796          * that a PMD range in the page table overlaps exactly with a PMD
1799          * range in the page cache.              1797          * range in the page cache.
1800          */                                      1798          */
1801         if ((vmf->pgoff & PG_PMD_COLOUR) !=      1799         if ((vmf->pgoff & PG_PMD_COLOUR) !=
1802             ((vmf->address >> PAGE_SHIFT) & P    1800             ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1803                 return true;                     1801                 return true;
1804                                                  1802 
1805         /* Fall back to PTEs if we're going t    1803         /* Fall back to PTEs if we're going to COW */
1806         if (write && !(vmf->vma->vm_flags & V    1804         if (write && !(vmf->vma->vm_flags & VM_SHARED))
1807                 return true;                     1805                 return true;
1808                                                  1806 
1809         /* If the PMD would extend outside th    1807         /* If the PMD would extend outside the VMA */
1810         if (pmd_addr < vmf->vma->vm_start)       1808         if (pmd_addr < vmf->vma->vm_start)
1811                 return true;                     1809                 return true;
1812         if ((pmd_addr + PMD_SIZE) > vmf->vma-    1810         if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1813                 return true;                     1811                 return true;
1814                                                  1812 
1815         /* If the PMD would extend beyond the    1813         /* If the PMD would extend beyond the file size */
1816         if ((xas->xa_index | PG_PMD_COLOUR) >    1814         if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1817                 return true;                     1815                 return true;
1818                                                  1816 
1819         return false;                            1817         return false;
1820 }                                                1818 }
1821                                                  1819 
1822 static vm_fault_t dax_iomap_pmd_fault(struct     1820 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1823                                const struct i    1821                                const struct iomap_ops *ops)
1824 {                                                1822 {
1825         struct address_space *mapping = vmf->    1823         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1826         XA_STATE_ORDER(xas, &mapping->i_pages    1824         XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1827         struct iomap_iter iter = {               1825         struct iomap_iter iter = {
1828                 .inode          = mapping->ho    1826                 .inode          = mapping->host,
1829                 .len            = PMD_SIZE,      1827                 .len            = PMD_SIZE,
1830                 .flags          = IOMAP_DAX |    1828                 .flags          = IOMAP_DAX | IOMAP_FAULT,
1831         };                                       1829         };
1832         vm_fault_t ret = VM_FAULT_FALLBACK;      1830         vm_fault_t ret = VM_FAULT_FALLBACK;
1833         pgoff_t max_pgoff;                       1831         pgoff_t max_pgoff;
1834         void *entry;                             1832         void *entry;
                                                   >> 1833         int error;
1835                                                  1834 
1836         if (vmf->flags & FAULT_FLAG_WRITE)       1835         if (vmf->flags & FAULT_FLAG_WRITE)
1837                 iter.flags |= IOMAP_WRITE;       1836                 iter.flags |= IOMAP_WRITE;
1838                                                  1837 
1839         /*                                       1838         /*
1840          * Check whether offset isn't beyond     1839          * Check whether offset isn't beyond end of file now. Caller is
1841          * supposed to hold locks serializing    1840          * supposed to hold locks serializing us with truncate / punch hole so
1842          * this is a reliable test.              1841          * this is a reliable test.
1843          */                                      1842          */
1844         max_pgoff = DIV_ROUND_UP(i_size_read(    1843         max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
1845                                                  1844 
1846         trace_dax_pmd_fault(iter.inode, vmf,     1845         trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
1847                                                  1846 
1848         if (xas.xa_index >= max_pgoff) {         1847         if (xas.xa_index >= max_pgoff) {
1849                 ret = VM_FAULT_SIGBUS;           1848                 ret = VM_FAULT_SIGBUS;
1850                 goto out;                        1849                 goto out;
1851         }                                        1850         }
1852                                                  1851 
1853         if (dax_fault_check_fallback(vmf, &xa    1852         if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
1854                 goto fallback;                   1853                 goto fallback;
1855                                                  1854 
1856         /*                                       1855         /*
1857          * grab_mapping_entry() will make sur    1856          * grab_mapping_entry() will make sure we get an empty PMD entry,
1858          * a zero PMD entry or a DAX PMD.  If    1857          * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1859          * entry is already in the array, for    1858          * entry is already in the array, for instance), it will return
1860          * VM_FAULT_FALLBACK.                    1859          * VM_FAULT_FALLBACK.
1861          */                                      1860          */
1862         entry = grab_mapping_entry(&xas, mapp    1861         entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1863         if (xa_is_internal(entry)) {             1862         if (xa_is_internal(entry)) {
1864                 ret = xa_to_internal(entry);     1863                 ret = xa_to_internal(entry);
1865                 goto fallback;                   1864                 goto fallback;
1866         }                                        1865         }
1867                                                  1866 
1868         /*                                       1867         /*
1869          * It is possible, particularly with     1868          * It is possible, particularly with mixed reads & writes to private
1870          * mappings, that we have raced with     1869          * mappings, that we have raced with a PTE fault that overlaps with
1871          * the PMD we need to set up.  If so     1870          * the PMD we need to set up.  If so just return and the fault will be
1872          * retried.                              1871          * retried.
1873          */                                      1872          */
1874         if (!pmd_none(*vmf->pmd) && !pmd_tran    1873         if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1875                         !pmd_devmap(*vmf->pmd    1874                         !pmd_devmap(*vmf->pmd)) {
1876                 ret = 0;                         1875                 ret = 0;
1877                 goto unlock_entry;               1876                 goto unlock_entry;
1878         }                                        1877         }
1879                                                  1878 
1880         iter.pos = (loff_t)xas.xa_index << PA    1879         iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1881         while (iomap_iter(&iter, ops) > 0) {  !! 1880         while ((error = iomap_iter(&iter, ops)) > 0) {
1882                 if (iomap_length(&iter) < PMD    1881                 if (iomap_length(&iter) < PMD_SIZE)
1883                         continue; /* actually    1882                         continue; /* actually breaks out of the loop */
1884                                                  1883 
1885                 ret = dax_fault_iter(vmf, &it    1884                 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1886                 if (ret != VM_FAULT_FALLBACK)    1885                 if (ret != VM_FAULT_FALLBACK)
1887                         iter.processed = PMD_    1886                         iter.processed = PMD_SIZE;
1888         }                                        1887         }
1889                                                  1888 
1890 unlock_entry:                                    1889 unlock_entry:
1891         dax_unlock_entry(&xas, entry);           1890         dax_unlock_entry(&xas, entry);
1892 fallback:                                        1891 fallback:
1893         if (ret == VM_FAULT_FALLBACK) {          1892         if (ret == VM_FAULT_FALLBACK) {
1894                 split_huge_pmd(vmf->vma, vmf-    1893                 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
1895                 count_vm_event(THP_FAULT_FALL    1894                 count_vm_event(THP_FAULT_FALLBACK);
1896         }                                        1895         }
1897 out:                                             1896 out:
1898         trace_dax_pmd_fault_done(iter.inode,     1897         trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1899         return ret;                              1898         return ret;
1900 }                                                1899 }
1901 #else                                            1900 #else
1902 static vm_fault_t dax_iomap_pmd_fault(struct     1901 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1903                                const struct i    1902                                const struct iomap_ops *ops)
1904 {                                                1903 {
1905         return VM_FAULT_FALLBACK;                1904         return VM_FAULT_FALLBACK;
1906 }                                                1905 }
1907 #endif /* CONFIG_FS_DAX_PMD */                   1906 #endif /* CONFIG_FS_DAX_PMD */
1908                                                  1907 
1909 /**                                              1908 /**
1910  * dax_iomap_fault - handle a page fault on a    1909  * dax_iomap_fault - handle a page fault on a DAX file
1911  * @vmf: The description of the fault            1910  * @vmf: The description of the fault
1912  * @order: Order of the page to fault in      !! 1911  * @pe_size: Size of the page to fault in
1913  * @pfnp: PFN to insert for synchronous fault    1912  * @pfnp: PFN to insert for synchronous faults if fsync is required
1914  * @iomap_errp: Storage for detailed error co    1913  * @iomap_errp: Storage for detailed error code in case of error
1915  * @ops: Iomap ops passed from the file syste    1914  * @ops: Iomap ops passed from the file system
1916  *                                               1915  *
1917  * When a page fault occurs, filesystems may     1916  * When a page fault occurs, filesystems may call this helper in
1918  * their fault handler for DAX files. dax_iom    1917  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1919  * has done all the necessary locking for pag    1918  * has done all the necessary locking for page fault to proceed
1920  * successfully.                                 1919  * successfully.
1921  */                                              1920  */
1922 vm_fault_t dax_iomap_fault(struct vm_fault *v !! 1921 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1923                     pfn_t *pfnp, int *iomap_e    1922                     pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1924 {                                                1923 {
1925         if (order == 0)                       !! 1924         switch (pe_size) {
                                                   >> 1925         case PE_SIZE_PTE:
1926                 return dax_iomap_pte_fault(vm    1926                 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1927         else if (order == PMD_ORDER)          !! 1927         case PE_SIZE_PMD:
1928                 return dax_iomap_pmd_fault(vm    1928                 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1929         else                                  !! 1929         default:
1930                 return VM_FAULT_FALLBACK;        1930                 return VM_FAULT_FALLBACK;
                                                   >> 1931         }
1931 }                                                1932 }
1932 EXPORT_SYMBOL_GPL(dax_iomap_fault);              1933 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1933                                                  1934 
1934 /*                                               1935 /*
1935  * dax_insert_pfn_mkwrite - insert PTE or PMD    1936  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1936  * @vmf: The description of the fault            1937  * @vmf: The description of the fault
1937  * @pfn: PFN to insert                           1938  * @pfn: PFN to insert
1938  * @order: Order of entry to insert.             1939  * @order: Order of entry to insert.
1939  *                                               1940  *
1940  * This function inserts a writeable PTE or P    1941  * This function inserts a writeable PTE or PMD entry into the page tables
1941  * for an mmaped DAX file.  It also marks the    1942  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1942  */                                              1943  */
1943 static vm_fault_t                                1944 static vm_fault_t
1944 dax_insert_pfn_mkwrite(struct vm_fault *vmf,     1945 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1945 {                                                1946 {
1946         struct address_space *mapping = vmf->    1947         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1947         XA_STATE_ORDER(xas, &mapping->i_pages    1948         XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1948         void *entry;                             1949         void *entry;
1949         vm_fault_t ret;                          1950         vm_fault_t ret;
1950                                                  1951 
1951         xas_lock_irq(&xas);                      1952         xas_lock_irq(&xas);
1952         entry = get_unlocked_entry(&xas, orde    1953         entry = get_unlocked_entry(&xas, order);
1953         /* Did we race with someone splitting    1954         /* Did we race with someone splitting entry or so? */
1954         if (!entry || dax_is_conflict(entry)     1955         if (!entry || dax_is_conflict(entry) ||
1955             (order == 0 && !dax_is_pte_entry(    1956             (order == 0 && !dax_is_pte_entry(entry))) {
1956                 put_unlocked_entry(&xas, entr    1957                 put_unlocked_entry(&xas, entry, WAKE_NEXT);
1957                 xas_unlock_irq(&xas);            1958                 xas_unlock_irq(&xas);
1958                 trace_dax_insert_pfn_mkwrite_    1959                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1959                                                  1960                                                       VM_FAULT_NOPAGE);
1960                 return VM_FAULT_NOPAGE;          1961                 return VM_FAULT_NOPAGE;
1961         }                                        1962         }
1962         xas_set_mark(&xas, PAGECACHE_TAG_DIRT    1963         xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1963         dax_lock_entry(&xas, entry);             1964         dax_lock_entry(&xas, entry);
1964         xas_unlock_irq(&xas);                    1965         xas_unlock_irq(&xas);
1965         if (order == 0)                          1966         if (order == 0)
1966                 ret = vmf_insert_mixed_mkwrit    1967                 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1967 #ifdef CONFIG_FS_DAX_PMD                         1968 #ifdef CONFIG_FS_DAX_PMD
1968         else if (order == PMD_ORDER)             1969         else if (order == PMD_ORDER)
1969                 ret = vmf_insert_pfn_pmd(vmf,    1970                 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1970 #endif                                           1971 #endif
1971         else                                     1972         else
1972                 ret = VM_FAULT_FALLBACK;         1973                 ret = VM_FAULT_FALLBACK;
1973         dax_unlock_entry(&xas, entry);           1974         dax_unlock_entry(&xas, entry);
1974         trace_dax_insert_pfn_mkwrite(mapping-    1975         trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1975         return ret;                              1976         return ret;
1976 }                                                1977 }
1977                                                  1978 
1978 /**                                              1979 /**
1979  * dax_finish_sync_fault - finish synchronous    1980  * dax_finish_sync_fault - finish synchronous page fault
1980  * @vmf: The description of the fault            1981  * @vmf: The description of the fault
1981  * @order: Order of entry to be inserted      !! 1982  * @pe_size: Size of entry to be inserted
1982  * @pfn: PFN to insert                           1983  * @pfn: PFN to insert
1983  *                                               1984  *
1984  * This function ensures that the file range     1985  * This function ensures that the file range touched by the page fault is
1985  * stored persistently on the media and handl    1986  * stored persistently on the media and handles inserting of appropriate page
1986  * table entry.                                  1987  * table entry.
1987  */                                              1988  */
1988 vm_fault_t dax_finish_sync_fault(struct vm_fa !! 1989 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1989                 pfn_t pfn)                    !! 1990                 enum page_entry_size pe_size, pfn_t pfn)
1990 {                                                1991 {
1991         int err;                                 1992         int err;
1992         loff_t start = ((loff_t)vmf->pgoff) <    1993         loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
                                                   >> 1994         unsigned int order = pe_order(pe_size);
1993         size_t len = PAGE_SIZE << order;         1995         size_t len = PAGE_SIZE << order;
1994                                                  1996 
1995         err = vfs_fsync_range(vmf->vma->vm_fi    1997         err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1996         if (err)                                 1998         if (err)
1997                 return VM_FAULT_SIGBUS;          1999                 return VM_FAULT_SIGBUS;
1998         return dax_insert_pfn_mkwrite(vmf, pf    2000         return dax_insert_pfn_mkwrite(vmf, pfn, order);
1999 }                                                2001 }
2000 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);        2002 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
2001                                                  2003 
2002 static loff_t dax_range_compare_iter(struct i    2004 static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
2003                 struct iomap_iter *it_dest, u    2005                 struct iomap_iter *it_dest, u64 len, bool *same)
2004 {                                                2006 {
2005         const struct iomap *smap = &it_src->i    2007         const struct iomap *smap = &it_src->iomap;
2006         const struct iomap *dmap = &it_dest->    2008         const struct iomap *dmap = &it_dest->iomap;
2007         loff_t pos1 = it_src->pos, pos2 = it_    2009         loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
2008         void *saddr, *daddr;                     2010         void *saddr, *daddr;
2009         int id, ret;                             2011         int id, ret;
2010                                                  2012 
2011         len = min(len, min(smap->length, dmap    2013         len = min(len, min(smap->length, dmap->length));
2012                                                  2014 
2013         if (smap->type == IOMAP_HOLE && dmap-    2015         if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
2014                 *same = true;                    2016                 *same = true;
2015                 return len;                      2017                 return len;
2016         }                                        2018         }
2017                                                  2019 
2018         if (smap->type == IOMAP_HOLE || dmap-    2020         if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
2019                 *same = false;                   2021                 *same = false;
2020                 return 0;                        2022                 return 0;
2021         }                                        2023         }
2022                                                  2024 
2023         id = dax_read_lock();                    2025         id = dax_read_lock();
2024         ret = dax_iomap_direct_access(smap, p    2026         ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
2025                                       &saddr,    2027                                       &saddr, NULL);
2026         if (ret < 0)                             2028         if (ret < 0)
2027                 goto out_unlock;                 2029                 goto out_unlock;
2028                                                  2030 
2029         ret = dax_iomap_direct_access(dmap, p    2031         ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
2030                                       &daddr,    2032                                       &daddr, NULL);
2031         if (ret < 0)                             2033         if (ret < 0)
2032                 goto out_unlock;                 2034                 goto out_unlock;
2033                                                  2035 
2034         *same = !memcmp(saddr, daddr, len);      2036         *same = !memcmp(saddr, daddr, len);
2035         if (!*same)                              2037         if (!*same)
2036                 len = 0;                         2038                 len = 0;
2037         dax_read_unlock(id);                     2039         dax_read_unlock(id);
2038         return len;                              2040         return len;
2039                                                  2041 
2040 out_unlock:                                      2042 out_unlock:
2041         dax_read_unlock(id);                     2043         dax_read_unlock(id);
2042         return -EIO;                             2044         return -EIO;
2043 }                                                2045 }
2044                                                  2046 
2045 int dax_dedupe_file_range_compare(struct inod    2047 int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
2046                 struct inode *dst, loff_t dst    2048                 struct inode *dst, loff_t dstoff, loff_t len, bool *same,
2047                 const struct iomap_ops *ops)     2049                 const struct iomap_ops *ops)
2048 {                                                2050 {
2049         struct iomap_iter src_iter = {           2051         struct iomap_iter src_iter = {
2050                 .inode          = src,           2052                 .inode          = src,
2051                 .pos            = srcoff,        2053                 .pos            = srcoff,
2052                 .len            = len,           2054                 .len            = len,
2053                 .flags          = IOMAP_DAX,     2055                 .flags          = IOMAP_DAX,
2054         };                                       2056         };
2055         struct iomap_iter dst_iter = {           2057         struct iomap_iter dst_iter = {
2056                 .inode          = dst,           2058                 .inode          = dst,
2057                 .pos            = dstoff,        2059                 .pos            = dstoff,
2058                 .len            = len,           2060                 .len            = len,
2059                 .flags          = IOMAP_DAX,     2061                 .flags          = IOMAP_DAX,
2060         };                                       2062         };
2061         int ret, compared = 0;                   2063         int ret, compared = 0;
2062                                                  2064 
2063         while ((ret = iomap_iter(&src_iter, o    2065         while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
2064                (ret = iomap_iter(&dst_iter, o    2066                (ret = iomap_iter(&dst_iter, ops)) > 0) {
2065                 compared = dax_range_compare_    2067                 compared = dax_range_compare_iter(&src_iter, &dst_iter,
2066                                 min(src_iter.    2068                                 min(src_iter.len, dst_iter.len), same);
2067                 if (compared < 0)                2069                 if (compared < 0)
2068                         return ret;              2070                         return ret;
2069                 src_iter.processed = dst_iter    2071                 src_iter.processed = dst_iter.processed = compared;
2070         }                                        2072         }
2071         return ret;                              2073         return ret;
2072 }                                                2074 }
2073                                                  2075 
2074 int dax_remap_file_range_prep(struct file *fi    2076 int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
2075                               struct file *fi    2077                               struct file *file_out, loff_t pos_out,
2076                               loff_t *len, un    2078                               loff_t *len, unsigned int remap_flags,
2077                               const struct io    2079                               const struct iomap_ops *ops)
2078 {                                                2080 {
2079         return __generic_remap_file_range_pre    2081         return __generic_remap_file_range_prep(file_in, pos_in, file_out,
2080                                                  2082                                                pos_out, len, remap_flags, ops);
2081 }                                                2083 }
2082 EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);    2084 EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
2083                                                  2085 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php