~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/dax.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * fs/dax.c - Direct Access filesystem code
  4  * Copyright (c) 2013-2014 Intel Corporation
  5  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
  6  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
  7  */
  8 
  9 #include <linux/atomic.h>
 10 #include <linux/blkdev.h>
 11 #include <linux/buffer_head.h>
 12 #include <linux/dax.h>
 13 #include <linux/fs.h>
 14 #include <linux/highmem.h>
 15 #include <linux/memcontrol.h>
 16 #include <linux/mm.h>
 17 #include <linux/mutex.h>
 18 #include <linux/pagevec.h>
 19 #include <linux/sched.h>
 20 #include <linux/sched/signal.h>
 21 #include <linux/uio.h>
 22 #include <linux/vmstat.h>
 23 #include <linux/pfn_t.h>
 24 #include <linux/sizes.h>
 25 #include <linux/mmu_notifier.h>
 26 #include <linux/iomap.h>
 27 #include <linux/rmap.h>
 28 #include <asm/pgalloc.h>
 29 
 30 #define CREATE_TRACE_POINTS
 31 #include <trace/events/fs_dax.h>
 32 
 33 /* We choose 4096 entries - same as per-zone page wait tables */
 34 #define DAX_WAIT_TABLE_BITS 12
 35 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
 36 
 37 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
 38 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
 39 #define PG_PMD_NR       (PMD_SIZE >> PAGE_SHIFT)
 40 
 41 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 42 
 43 static int __init init_dax_wait_table(void)
 44 {
 45         int i;
 46 
 47         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
 48                 init_waitqueue_head(wait_table + i);
 49         return 0;
 50 }
 51 fs_initcall(init_dax_wait_table);
 52 
 53 /*
 54  * DAX pagecache entries use XArray value entries so they can't be mistaken
 55  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
 56  * and two more to tell us if the entry is a zero page or an empty entry that
 57  * is just used for locking.  In total four special bits.
 58  *
 59  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
 60  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
 61  * block allocation.
 62  */
 63 #define DAX_SHIFT       (4)
 64 #define DAX_LOCKED      (1UL << 0)
 65 #define DAX_PMD         (1UL << 1)
 66 #define DAX_ZERO_PAGE   (1UL << 2)
 67 #define DAX_EMPTY       (1UL << 3)
 68 
 69 static unsigned long dax_to_pfn(void *entry)
 70 {
 71         return xa_to_value(entry) >> DAX_SHIFT;
 72 }
 73 
 74 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
 75 {
 76         return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
 77 }
 78 
 79 static bool dax_is_locked(void *entry)
 80 {
 81         return xa_to_value(entry) & DAX_LOCKED;
 82 }
 83 
 84 static unsigned int dax_entry_order(void *entry)
 85 {
 86         if (xa_to_value(entry) & DAX_PMD)
 87                 return PMD_ORDER;
 88         return 0;
 89 }
 90 
 91 static unsigned long dax_is_pmd_entry(void *entry)
 92 {
 93         return xa_to_value(entry) & DAX_PMD;
 94 }
 95 
 96 static bool dax_is_pte_entry(void *entry)
 97 {
 98         return !(xa_to_value(entry) & DAX_PMD);
 99 }
100 
101 static int dax_is_zero_entry(void *entry)
102 {
103         return xa_to_value(entry) & DAX_ZERO_PAGE;
104 }
105 
106 static int dax_is_empty_entry(void *entry)
107 {
108         return xa_to_value(entry) & DAX_EMPTY;
109 }
110 
111 /*
112  * true if the entry that was found is of a smaller order than the entry
113  * we were looking for
114  */
115 static bool dax_is_conflict(void *entry)
116 {
117         return entry == XA_RETRY_ENTRY;
118 }
119 
120 /*
121  * DAX page cache entry locking
122  */
123 struct exceptional_entry_key {
124         struct xarray *xa;
125         pgoff_t entry_start;
126 };
127 
128 struct wait_exceptional_entry_queue {
129         wait_queue_entry_t wait;
130         struct exceptional_entry_key key;
131 };
132 
133 /**
134  * enum dax_wake_mode: waitqueue wakeup behaviour
135  * @WAKE_ALL: wake all waiters in the waitqueue
136  * @WAKE_NEXT: wake only the first waiter in the waitqueue
137  */
138 enum dax_wake_mode {
139         WAKE_ALL,
140         WAKE_NEXT,
141 };
142 
143 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
144                 void *entry, struct exceptional_entry_key *key)
145 {
146         unsigned long hash;
147         unsigned long index = xas->xa_index;
148 
149         /*
150          * If 'entry' is a PMD, align the 'index' that we use for the wait
151          * queue to the start of that PMD.  This ensures that all offsets in
152          * the range covered by the PMD map to the same bit lock.
153          */
154         if (dax_is_pmd_entry(entry))
155                 index &= ~PG_PMD_COLOUR;
156         key->xa = xas->xa;
157         key->entry_start = index;
158 
159         hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
160         return wait_table + hash;
161 }
162 
163 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
164                 unsigned int mode, int sync, void *keyp)
165 {
166         struct exceptional_entry_key *key = keyp;
167         struct wait_exceptional_entry_queue *ewait =
168                 container_of(wait, struct wait_exceptional_entry_queue, wait);
169 
170         if (key->xa != ewait->key.xa ||
171             key->entry_start != ewait->key.entry_start)
172                 return 0;
173         return autoremove_wake_function(wait, mode, sync, NULL);
174 }
175 
176 /*
177  * @entry may no longer be the entry at the index in the mapping.
178  * The important information it's conveying is whether the entry at
179  * this index used to be a PMD entry.
180  */
181 static void dax_wake_entry(struct xa_state *xas, void *entry,
182                            enum dax_wake_mode mode)
183 {
184         struct exceptional_entry_key key;
185         wait_queue_head_t *wq;
186 
187         wq = dax_entry_waitqueue(xas, entry, &key);
188 
189         /*
190          * Checking for locked entry and prepare_to_wait_exclusive() happens
191          * under the i_pages lock, ditto for entry handling in our callers.
192          * So at this point all tasks that could have seen our entry locked
193          * must be in the waitqueue and the following check will see them.
194          */
195         if (waitqueue_active(wq))
196                 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
197 }
198 
199 /*
200  * Look up entry in page cache, wait for it to become unlocked if it
201  * is a DAX entry and return it.  The caller must subsequently call
202  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
203  * if it did.  The entry returned may have a larger order than @order.
204  * If @order is larger than the order of the entry found in i_pages, this
205  * function returns a dax_is_conflict entry.
206  *
207  * Must be called with the i_pages lock held.
208  */
209 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
210 {
211         void *entry;
212         struct wait_exceptional_entry_queue ewait;
213         wait_queue_head_t *wq;
214 
215         init_wait(&ewait.wait);
216         ewait.wait.func = wake_exceptional_entry_func;
217 
218         for (;;) {
219                 entry = xas_find_conflict(xas);
220                 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
221                         return entry;
222                 if (dax_entry_order(entry) < order)
223                         return XA_RETRY_ENTRY;
224                 if (!dax_is_locked(entry))
225                         return entry;
226 
227                 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
228                 prepare_to_wait_exclusive(wq, &ewait.wait,
229                                           TASK_UNINTERRUPTIBLE);
230                 xas_unlock_irq(xas);
231                 xas_reset(xas);
232                 schedule();
233                 finish_wait(wq, &ewait.wait);
234                 xas_lock_irq(xas);
235         }
236 }
237 
238 /*
239  * The only thing keeping the address space around is the i_pages lock
240  * (it's cycled in clear_inode() after removing the entries from i_pages)
241  * After we call xas_unlock_irq(), we cannot touch xas->xa.
242  */
243 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
244 {
245         struct wait_exceptional_entry_queue ewait;
246         wait_queue_head_t *wq;
247 
248         init_wait(&ewait.wait);
249         ewait.wait.func = wake_exceptional_entry_func;
250 
251         wq = dax_entry_waitqueue(xas, entry, &ewait.key);
252         /*
253          * Unlike get_unlocked_entry() there is no guarantee that this
254          * path ever successfully retrieves an unlocked entry before an
255          * inode dies. Perform a non-exclusive wait in case this path
256          * never successfully performs its own wake up.
257          */
258         prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
259         xas_unlock_irq(xas);
260         schedule();
261         finish_wait(wq, &ewait.wait);
262 }
263 
264 static void put_unlocked_entry(struct xa_state *xas, void *entry,
265                                enum dax_wake_mode mode)
266 {
267         if (entry && !dax_is_conflict(entry))
268                 dax_wake_entry(xas, entry, mode);
269 }
270 
271 /*
272  * We used the xa_state to get the entry, but then we locked the entry and
273  * dropped the xa_lock, so we know the xa_state is stale and must be reset
274  * before use.
275  */
276 static void dax_unlock_entry(struct xa_state *xas, void *entry)
277 {
278         void *old;
279 
280         BUG_ON(dax_is_locked(entry));
281         xas_reset(xas);
282         xas_lock_irq(xas);
283         old = xas_store(xas, entry);
284         xas_unlock_irq(xas);
285         BUG_ON(!dax_is_locked(old));
286         dax_wake_entry(xas, entry, WAKE_NEXT);
287 }
288 
289 /*
290  * Return: The entry stored at this location before it was locked.
291  */
292 static void *dax_lock_entry(struct xa_state *xas, void *entry)
293 {
294         unsigned long v = xa_to_value(entry);
295         return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
296 }
297 
298 static unsigned long dax_entry_size(void *entry)
299 {
300         if (dax_is_zero_entry(entry))
301                 return 0;
302         else if (dax_is_empty_entry(entry))
303                 return 0;
304         else if (dax_is_pmd_entry(entry))
305                 return PMD_SIZE;
306         else
307                 return PAGE_SIZE;
308 }
309 
310 static unsigned long dax_end_pfn(void *entry)
311 {
312         return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
313 }
314 
315 /*
316  * Iterate through all mapped pfns represented by an entry, i.e. skip
317  * 'empty' and 'zero' entries.
318  */
319 #define for_each_mapped_pfn(entry, pfn) \
320         for (pfn = dax_to_pfn(entry); \
321                         pfn < dax_end_pfn(entry); pfn++)
322 
323 static inline bool dax_page_is_shared(struct page *page)
324 {
325         return page->mapping == PAGE_MAPPING_DAX_SHARED;
326 }
327 
328 /*
329  * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
330  * refcount.
331  */
332 static inline void dax_page_share_get(struct page *page)
333 {
334         if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
335                 /*
336                  * Reset the index if the page was already mapped
337                  * regularly before.
338                  */
339                 if (page->mapping)
340                         page->share = 1;
341                 page->mapping = PAGE_MAPPING_DAX_SHARED;
342         }
343         page->share++;
344 }
345 
346 static inline unsigned long dax_page_share_put(struct page *page)
347 {
348         return --page->share;
349 }
350 
351 /*
352  * When it is called in dax_insert_entry(), the shared flag will indicate that
353  * whether this entry is shared by multiple files.  If so, set the page->mapping
354  * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
355  */
356 static void dax_associate_entry(void *entry, struct address_space *mapping,
357                 struct vm_area_struct *vma, unsigned long address, bool shared)
358 {
359         unsigned long size = dax_entry_size(entry), pfn, index;
360         int i = 0;
361 
362         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
363                 return;
364 
365         index = linear_page_index(vma, address & ~(size - 1));
366         for_each_mapped_pfn(entry, pfn) {
367                 struct page *page = pfn_to_page(pfn);
368 
369                 if (shared) {
370                         dax_page_share_get(page);
371                 } else {
372                         WARN_ON_ONCE(page->mapping);
373                         page->mapping = mapping;
374                         page->index = index + i++;
375                 }
376         }
377 }
378 
379 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
380                 bool trunc)
381 {
382         unsigned long pfn;
383 
384         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
385                 return;
386 
387         for_each_mapped_pfn(entry, pfn) {
388                 struct page *page = pfn_to_page(pfn);
389 
390                 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
391                 if (dax_page_is_shared(page)) {
392                         /* keep the shared flag if this page is still shared */
393                         if (dax_page_share_put(page) > 0)
394                                 continue;
395                 } else
396                         WARN_ON_ONCE(page->mapping && page->mapping != mapping);
397                 page->mapping = NULL;
398                 page->index = 0;
399         }
400 }
401 
402 static struct page *dax_busy_page(void *entry)
403 {
404         unsigned long pfn;
405 
406         for_each_mapped_pfn(entry, pfn) {
407                 struct page *page = pfn_to_page(pfn);
408 
409                 if (page_ref_count(page) > 1)
410                         return page;
411         }
412         return NULL;
413 }
414 
415 /**
416  * dax_lock_folio - Lock the DAX entry corresponding to a folio
417  * @folio: The folio whose entry we want to lock
418  *
419  * Context: Process context.
420  * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
421  * not be locked.
422  */
423 dax_entry_t dax_lock_folio(struct folio *folio)
424 {
425         XA_STATE(xas, NULL, 0);
426         void *entry;
427 
428         /* Ensure folio->mapping isn't freed while we look at it */
429         rcu_read_lock();
430         for (;;) {
431                 struct address_space *mapping = READ_ONCE(folio->mapping);
432 
433                 entry = NULL;
434                 if (!mapping || !dax_mapping(mapping))
435                         break;
436 
437                 /*
438                  * In the device-dax case there's no need to lock, a
439                  * struct dev_pagemap pin is sufficient to keep the
440                  * inode alive, and we assume we have dev_pagemap pin
441                  * otherwise we would not have a valid pfn_to_page()
442                  * translation.
443                  */
444                 entry = (void *)~0UL;
445                 if (S_ISCHR(mapping->host->i_mode))
446                         break;
447 
448                 xas.xa = &mapping->i_pages;
449                 xas_lock_irq(&xas);
450                 if (mapping != folio->mapping) {
451                         xas_unlock_irq(&xas);
452                         continue;
453                 }
454                 xas_set(&xas, folio->index);
455                 entry = xas_load(&xas);
456                 if (dax_is_locked(entry)) {
457                         rcu_read_unlock();
458                         wait_entry_unlocked(&xas, entry);
459                         rcu_read_lock();
460                         continue;
461                 }
462                 dax_lock_entry(&xas, entry);
463                 xas_unlock_irq(&xas);
464                 break;
465         }
466         rcu_read_unlock();
467         return (dax_entry_t)entry;
468 }
469 
470 void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
471 {
472         struct address_space *mapping = folio->mapping;
473         XA_STATE(xas, &mapping->i_pages, folio->index);
474 
475         if (S_ISCHR(mapping->host->i_mode))
476                 return;
477 
478         dax_unlock_entry(&xas, (void *)cookie);
479 }
480 
481 /*
482  * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
483  * @mapping: the file's mapping whose entry we want to lock
484  * @index: the offset within this file
485  * @page: output the dax page corresponding to this dax entry
486  *
487  * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
488  * could not be locked.
489  */
490 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
491                 struct page **page)
492 {
493         XA_STATE(xas, NULL, 0);
494         void *entry;
495 
496         rcu_read_lock();
497         for (;;) {
498                 entry = NULL;
499                 if (!dax_mapping(mapping))
500                         break;
501 
502                 xas.xa = &mapping->i_pages;
503                 xas_lock_irq(&xas);
504                 xas_set(&xas, index);
505                 entry = xas_load(&xas);
506                 if (dax_is_locked(entry)) {
507                         rcu_read_unlock();
508                         wait_entry_unlocked(&xas, entry);
509                         rcu_read_lock();
510                         continue;
511                 }
512                 if (!entry ||
513                     dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
514                         /*
515                          * Because we are looking for entry from file's mapping
516                          * and index, so the entry may not be inserted for now,
517                          * or even a zero/empty entry.  We don't think this is
518                          * an error case.  So, return a special value and do
519                          * not output @page.
520                          */
521                         entry = (void *)~0UL;
522                 } else {
523                         *page = pfn_to_page(dax_to_pfn(entry));
524                         dax_lock_entry(&xas, entry);
525                 }
526                 xas_unlock_irq(&xas);
527                 break;
528         }
529         rcu_read_unlock();
530         return (dax_entry_t)entry;
531 }
532 
533 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
534                 dax_entry_t cookie)
535 {
536         XA_STATE(xas, &mapping->i_pages, index);
537 
538         if (cookie == ~0UL)
539                 return;
540 
541         dax_unlock_entry(&xas, (void *)cookie);
542 }
543 
544 /*
545  * Find page cache entry at given index. If it is a DAX entry, return it
546  * with the entry locked. If the page cache doesn't contain an entry at
547  * that index, add a locked empty entry.
548  *
549  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
550  * either return that locked entry or will return VM_FAULT_FALLBACK.
551  * This will happen if there are any PTE entries within the PMD range
552  * that we are requesting.
553  *
554  * We always favor PTE entries over PMD entries. There isn't a flow where we
555  * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
556  * insertion will fail if it finds any PTE entries already in the tree, and a
557  * PTE insertion will cause an existing PMD entry to be unmapped and
558  * downgraded to PTE entries.  This happens for both PMD zero pages as
559  * well as PMD empty entries.
560  *
561  * The exception to this downgrade path is for PMD entries that have
562  * real storage backing them.  We will leave these real PMD entries in
563  * the tree, and PTE writes will simply dirty the entire PMD entry.
564  *
565  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
566  * persistent memory the benefit is doubtful. We can add that later if we can
567  * show it helps.
568  *
569  * On error, this function does not return an ERR_PTR.  Instead it returns
570  * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
571  * overlap with xarray value entries.
572  */
573 static void *grab_mapping_entry(struct xa_state *xas,
574                 struct address_space *mapping, unsigned int order)
575 {
576         unsigned long index = xas->xa_index;
577         bool pmd_downgrade;     /* splitting PMD entry into PTE entries? */
578         void *entry;
579 
580 retry:
581         pmd_downgrade = false;
582         xas_lock_irq(xas);
583         entry = get_unlocked_entry(xas, order);
584 
585         if (entry) {
586                 if (dax_is_conflict(entry))
587                         goto fallback;
588                 if (!xa_is_value(entry)) {
589                         xas_set_err(xas, -EIO);
590                         goto out_unlock;
591                 }
592 
593                 if (order == 0) {
594                         if (dax_is_pmd_entry(entry) &&
595                             (dax_is_zero_entry(entry) ||
596                              dax_is_empty_entry(entry))) {
597                                 pmd_downgrade = true;
598                         }
599                 }
600         }
601 
602         if (pmd_downgrade) {
603                 /*
604                  * Make sure 'entry' remains valid while we drop
605                  * the i_pages lock.
606                  */
607                 dax_lock_entry(xas, entry);
608 
609                 /*
610                  * Besides huge zero pages the only other thing that gets
611                  * downgraded are empty entries which don't need to be
612                  * unmapped.
613                  */
614                 if (dax_is_zero_entry(entry)) {
615                         xas_unlock_irq(xas);
616                         unmap_mapping_pages(mapping,
617                                         xas->xa_index & ~PG_PMD_COLOUR,
618                                         PG_PMD_NR, false);
619                         xas_reset(xas);
620                         xas_lock_irq(xas);
621                 }
622 
623                 dax_disassociate_entry(entry, mapping, false);
624                 xas_store(xas, NULL);   /* undo the PMD join */
625                 dax_wake_entry(xas, entry, WAKE_ALL);
626                 mapping->nrpages -= PG_PMD_NR;
627                 entry = NULL;
628                 xas_set(xas, index);
629         }
630 
631         if (entry) {
632                 dax_lock_entry(xas, entry);
633         } else {
634                 unsigned long flags = DAX_EMPTY;
635 
636                 if (order > 0)
637                         flags |= DAX_PMD;
638                 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
639                 dax_lock_entry(xas, entry);
640                 if (xas_error(xas))
641                         goto out_unlock;
642                 mapping->nrpages += 1UL << order;
643         }
644 
645 out_unlock:
646         xas_unlock_irq(xas);
647         if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
648                 goto retry;
649         if (xas->xa_node == XA_ERROR(-ENOMEM))
650                 return xa_mk_internal(VM_FAULT_OOM);
651         if (xas_error(xas))
652                 return xa_mk_internal(VM_FAULT_SIGBUS);
653         return entry;
654 fallback:
655         xas_unlock_irq(xas);
656         return xa_mk_internal(VM_FAULT_FALLBACK);
657 }
658 
659 /**
660  * dax_layout_busy_page_range - find first pinned page in @mapping
661  * @mapping: address space to scan for a page with ref count > 1
662  * @start: Starting offset. Page containing 'start' is included.
663  * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
664  *       pages from 'start' till the end of file are included.
665  *
666  * DAX requires ZONE_DEVICE mapped pages. These pages are never
667  * 'onlined' to the page allocator so they are considered idle when
668  * page->count == 1. A filesystem uses this interface to determine if
669  * any page in the mapping is busy, i.e. for DMA, or other
670  * get_user_pages() usages.
671  *
672  * It is expected that the filesystem is holding locks to block the
673  * establishment of new mappings in this address_space. I.e. it expects
674  * to be able to run unmap_mapping_range() and subsequently not race
675  * mapping_mapped() becoming true.
676  */
677 struct page *dax_layout_busy_page_range(struct address_space *mapping,
678                                         loff_t start, loff_t end)
679 {
680         void *entry;
681         unsigned int scanned = 0;
682         struct page *page = NULL;
683         pgoff_t start_idx = start >> PAGE_SHIFT;
684         pgoff_t end_idx;
685         XA_STATE(xas, &mapping->i_pages, start_idx);
686 
687         /*
688          * In the 'limited' case get_user_pages() for dax is disabled.
689          */
690         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
691                 return NULL;
692 
693         if (!dax_mapping(mapping) || !mapping_mapped(mapping))
694                 return NULL;
695 
696         /* If end == LLONG_MAX, all pages from start to till end of file */
697         if (end == LLONG_MAX)
698                 end_idx = ULONG_MAX;
699         else
700                 end_idx = end >> PAGE_SHIFT;
701         /*
702          * If we race get_user_pages_fast() here either we'll see the
703          * elevated page count in the iteration and wait, or
704          * get_user_pages_fast() will see that the page it took a reference
705          * against is no longer mapped in the page tables and bail to the
706          * get_user_pages() slow path.  The slow path is protected by
707          * pte_lock() and pmd_lock(). New references are not taken without
708          * holding those locks, and unmap_mapping_pages() will not zero the
709          * pte or pmd without holding the respective lock, so we are
710          * guaranteed to either see new references or prevent new
711          * references from being established.
712          */
713         unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
714 
715         xas_lock_irq(&xas);
716         xas_for_each(&xas, entry, end_idx) {
717                 if (WARN_ON_ONCE(!xa_is_value(entry)))
718                         continue;
719                 if (unlikely(dax_is_locked(entry)))
720                         entry = get_unlocked_entry(&xas, 0);
721                 if (entry)
722                         page = dax_busy_page(entry);
723                 put_unlocked_entry(&xas, entry, WAKE_NEXT);
724                 if (page)
725                         break;
726                 if (++scanned % XA_CHECK_SCHED)
727                         continue;
728 
729                 xas_pause(&xas);
730                 xas_unlock_irq(&xas);
731                 cond_resched();
732                 xas_lock_irq(&xas);
733         }
734         xas_unlock_irq(&xas);
735         return page;
736 }
737 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
738 
739 struct page *dax_layout_busy_page(struct address_space *mapping)
740 {
741         return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
742 }
743 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
744 
745 static int __dax_invalidate_entry(struct address_space *mapping,
746                                           pgoff_t index, bool trunc)
747 {
748         XA_STATE(xas, &mapping->i_pages, index);
749         int ret = 0;
750         void *entry;
751 
752         xas_lock_irq(&xas);
753         entry = get_unlocked_entry(&xas, 0);
754         if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
755                 goto out;
756         if (!trunc &&
757             (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
758              xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
759                 goto out;
760         dax_disassociate_entry(entry, mapping, trunc);
761         xas_store(&xas, NULL);
762         mapping->nrpages -= 1UL << dax_entry_order(entry);
763         ret = 1;
764 out:
765         put_unlocked_entry(&xas, entry, WAKE_ALL);
766         xas_unlock_irq(&xas);
767         return ret;
768 }
769 
770 static int __dax_clear_dirty_range(struct address_space *mapping,
771                 pgoff_t start, pgoff_t end)
772 {
773         XA_STATE(xas, &mapping->i_pages, start);
774         unsigned int scanned = 0;
775         void *entry;
776 
777         xas_lock_irq(&xas);
778         xas_for_each(&xas, entry, end) {
779                 entry = get_unlocked_entry(&xas, 0);
780                 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
781                 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
782                 put_unlocked_entry(&xas, entry, WAKE_NEXT);
783 
784                 if (++scanned % XA_CHECK_SCHED)
785                         continue;
786 
787                 xas_pause(&xas);
788                 xas_unlock_irq(&xas);
789                 cond_resched();
790                 xas_lock_irq(&xas);
791         }
792         xas_unlock_irq(&xas);
793 
794         return 0;
795 }
796 
797 /*
798  * Delete DAX entry at @index from @mapping.  Wait for it
799  * to be unlocked before deleting it.
800  */
801 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
802 {
803         int ret = __dax_invalidate_entry(mapping, index, true);
804 
805         /*
806          * This gets called from truncate / punch_hole path. As such, the caller
807          * must hold locks protecting against concurrent modifications of the
808          * page cache (usually fs-private i_mmap_sem for writing). Since the
809          * caller has seen a DAX entry for this index, we better find it
810          * at that index as well...
811          */
812         WARN_ON_ONCE(!ret);
813         return ret;
814 }
815 
816 /*
817  * Invalidate DAX entry if it is clean.
818  */
819 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
820                                       pgoff_t index)
821 {
822         return __dax_invalidate_entry(mapping, index, false);
823 }
824 
825 static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
826 {
827         return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
828 }
829 
830 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
831 {
832         pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
833         void *vto, *kaddr;
834         long rc;
835         int id;
836 
837         id = dax_read_lock();
838         rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
839                                 &kaddr, NULL);
840         if (rc < 0) {
841                 dax_read_unlock(id);
842                 return rc;
843         }
844         vto = kmap_atomic(vmf->cow_page);
845         copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
846         kunmap_atomic(vto);
847         dax_read_unlock(id);
848         return 0;
849 }
850 
851 /*
852  * MAP_SYNC on a dax mapping guarantees dirty metadata is
853  * flushed on write-faults (non-cow), but not read-faults.
854  */
855 static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
856                 struct vm_area_struct *vma)
857 {
858         return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
859                 (iter->iomap.flags & IOMAP_F_DIRTY);
860 }
861 
862 /*
863  * By this point grab_mapping_entry() has ensured that we have a locked entry
864  * of the appropriate size so we don't have to worry about downgrading PMDs to
865  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
866  * already in the tree, we will skip the insertion and just dirty the PMD as
867  * appropriate.
868  */
869 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
870                 const struct iomap_iter *iter, void *entry, pfn_t pfn,
871                 unsigned long flags)
872 {
873         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
874         void *new_entry = dax_make_entry(pfn, flags);
875         bool write = iter->flags & IOMAP_WRITE;
876         bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
877         bool shared = iter->iomap.flags & IOMAP_F_SHARED;
878 
879         if (dirty)
880                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
881 
882         if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
883                 unsigned long index = xas->xa_index;
884                 /* we are replacing a zero page with block mapping */
885                 if (dax_is_pmd_entry(entry))
886                         unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
887                                         PG_PMD_NR, false);
888                 else /* pte entry */
889                         unmap_mapping_pages(mapping, index, 1, false);
890         }
891 
892         xas_reset(xas);
893         xas_lock_irq(xas);
894         if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
895                 void *old;
896 
897                 dax_disassociate_entry(entry, mapping, false);
898                 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
899                                 shared);
900                 /*
901                  * Only swap our new entry into the page cache if the current
902                  * entry is a zero page or an empty entry.  If a normal PTE or
903                  * PMD entry is already in the cache, we leave it alone.  This
904                  * means that if we are trying to insert a PTE and the
905                  * existing entry is a PMD, we will just leave the PMD in the
906                  * tree and dirty it if necessary.
907                  */
908                 old = dax_lock_entry(xas, new_entry);
909                 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
910                                         DAX_LOCKED));
911                 entry = new_entry;
912         } else {
913                 xas_load(xas);  /* Walk the xa_state */
914         }
915 
916         if (dirty)
917                 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
918 
919         if (write && shared)
920                 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
921 
922         xas_unlock_irq(xas);
923         return entry;
924 }
925 
926 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
927                 struct address_space *mapping, void *entry)
928 {
929         unsigned long pfn, index, count, end;
930         long ret = 0;
931         struct vm_area_struct *vma;
932 
933         /*
934          * A page got tagged dirty in DAX mapping? Something is seriously
935          * wrong.
936          */
937         if (WARN_ON(!xa_is_value(entry)))
938                 return -EIO;
939 
940         if (unlikely(dax_is_locked(entry))) {
941                 void *old_entry = entry;
942 
943                 entry = get_unlocked_entry(xas, 0);
944 
945                 /* Entry got punched out / reallocated? */
946                 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
947                         goto put_unlocked;
948                 /*
949                  * Entry got reallocated elsewhere? No need to writeback.
950                  * We have to compare pfns as we must not bail out due to
951                  * difference in lockbit or entry type.
952                  */
953                 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
954                         goto put_unlocked;
955                 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
956                                         dax_is_zero_entry(entry))) {
957                         ret = -EIO;
958                         goto put_unlocked;
959                 }
960 
961                 /* Another fsync thread may have already done this entry */
962                 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
963                         goto put_unlocked;
964         }
965 
966         /* Lock the entry to serialize with page faults */
967         dax_lock_entry(xas, entry);
968 
969         /*
970          * We can clear the tag now but we have to be careful so that concurrent
971          * dax_writeback_one() calls for the same index cannot finish before we
972          * actually flush the caches. This is achieved as the calls will look
973          * at the entry only under the i_pages lock and once they do that
974          * they will see the entry locked and wait for it to unlock.
975          */
976         xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
977         xas_unlock_irq(xas);
978 
979         /*
980          * If dax_writeback_mapping_range() was given a wbc->range_start
981          * in the middle of a PMD, the 'index' we use needs to be
982          * aligned to the start of the PMD.
983          * This allows us to flush for PMD_SIZE and not have to worry about
984          * partial PMD writebacks.
985          */
986         pfn = dax_to_pfn(entry);
987         count = 1UL << dax_entry_order(entry);
988         index = xas->xa_index & ~(count - 1);
989         end = index + count - 1;
990 
991         /* Walk all mappings of a given index of a file and writeprotect them */
992         i_mmap_lock_read(mapping);
993         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
994                 pfn_mkclean_range(pfn, count, index, vma);
995                 cond_resched();
996         }
997         i_mmap_unlock_read(mapping);
998 
999         dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
1000         /*
1001          * After we have flushed the cache, we can clear the dirty tag. There
1002          * cannot be new dirty data in the pfn after the flush has completed as
1003          * the pfn mappings are writeprotected and fault waits for mapping
1004          * entry lock.
1005          */
1006         xas_reset(xas);
1007         xas_lock_irq(xas);
1008         xas_store(xas, entry);
1009         xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
1010         dax_wake_entry(xas, entry, WAKE_NEXT);
1011 
1012         trace_dax_writeback_one(mapping->host, index, count);
1013         return ret;
1014 
1015  put_unlocked:
1016         put_unlocked_entry(xas, entry, WAKE_NEXT);
1017         return ret;
1018 }
1019 
1020 /*
1021  * Flush the mapping to the persistent domain within the byte range of [start,
1022  * end]. This is required by data integrity operations to ensure file data is
1023  * on persistent storage prior to completion of the operation.
1024  */
1025 int dax_writeback_mapping_range(struct address_space *mapping,
1026                 struct dax_device *dax_dev, struct writeback_control *wbc)
1027 {
1028         XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
1029         struct inode *inode = mapping->host;
1030         pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
1031         void *entry;
1032         int ret = 0;
1033         unsigned int scanned = 0;
1034 
1035         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1036                 return -EIO;
1037 
1038         if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
1039                 return 0;
1040 
1041         trace_dax_writeback_range(inode, xas.xa_index, end_index);
1042 
1043         tag_pages_for_writeback(mapping, xas.xa_index, end_index);
1044 
1045         xas_lock_irq(&xas);
1046         xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1047                 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1048                 if (ret < 0) {
1049                         mapping_set_error(mapping, ret);
1050                         break;
1051                 }
1052                 if (++scanned % XA_CHECK_SCHED)
1053                         continue;
1054 
1055                 xas_pause(&xas);
1056                 xas_unlock_irq(&xas);
1057                 cond_resched();
1058                 xas_lock_irq(&xas);
1059         }
1060         xas_unlock_irq(&xas);
1061         trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1062         return ret;
1063 }
1064 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1065 
1066 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1067                 size_t size, void **kaddr, pfn_t *pfnp)
1068 {
1069         pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1070         int id, rc = 0;
1071         long length;
1072 
1073         id = dax_read_lock();
1074         length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1075                                    DAX_ACCESS, kaddr, pfnp);
1076         if (length < 0) {
1077                 rc = length;
1078                 goto out;
1079         }
1080         if (!pfnp)
1081                 goto out_check_addr;
1082         rc = -EINVAL;
1083         if (PFN_PHYS(length) < size)
1084                 goto out;
1085         if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1086                 goto out;
1087         /* For larger pages we need devmap */
1088         if (length > 1 && !pfn_t_devmap(*pfnp))
1089                 goto out;
1090         rc = 0;
1091 
1092 out_check_addr:
1093         if (!kaddr)
1094                 goto out;
1095         if (!*kaddr)
1096                 rc = -EFAULT;
1097 out:
1098         dax_read_unlock(id);
1099         return rc;
1100 }
1101 
1102 /**
1103  * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
1104  * by copying the data before and after the range to be written.
1105  * @pos:        address to do copy from.
1106  * @length:     size of copy operation.
1107  * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1108  * @srcmap:     iomap srcmap
1109  * @daddr:      destination address to copy to.
1110  *
1111  * This can be called from two places. Either during DAX write fault (page
1112  * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1113  * write operation, dax_iomap_iter() might call this to do the copy of either
1114  * start or end unaligned address. In the latter case the rest of the copy of
1115  * aligned ranges is taken care by dax_iomap_iter() itself.
1116  * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the
1117  * area to make sure no old data remains.
1118  */
1119 static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
1120                 const struct iomap *srcmap, void *daddr)
1121 {
1122         loff_t head_off = pos & (align_size - 1);
1123         size_t size = ALIGN(head_off + length, align_size);
1124         loff_t end = pos + length;
1125         loff_t pg_end = round_up(end, align_size);
1126         /* copy_all is usually in page fault case */
1127         bool copy_all = head_off == 0 && end == pg_end;
1128         /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
1129         bool zero_edge = srcmap->flags & IOMAP_F_SHARED ||
1130                          srcmap->type == IOMAP_UNWRITTEN;
1131         void *saddr = NULL;
1132         int ret = 0;
1133 
1134         if (!zero_edge) {
1135                 ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1136                 if (ret)
1137                         return dax_mem2blk_err(ret);
1138         }
1139 
1140         if (copy_all) {
1141                 if (zero_edge)
1142                         memset(daddr, 0, size);
1143                 else
1144                         ret = copy_mc_to_kernel(daddr, saddr, length);
1145                 goto out;
1146         }
1147 
1148         /* Copy the head part of the range */
1149         if (head_off) {
1150                 if (zero_edge)
1151                         memset(daddr, 0, head_off);
1152                 else {
1153                         ret = copy_mc_to_kernel(daddr, saddr, head_off);
1154                         if (ret)
1155                                 return -EIO;
1156                 }
1157         }
1158 
1159         /* Copy the tail part of the range */
1160         if (end < pg_end) {
1161                 loff_t tail_off = head_off + length;
1162                 loff_t tail_len = pg_end - end;
1163 
1164                 if (zero_edge)
1165                         memset(daddr + tail_off, 0, tail_len);
1166                 else {
1167                         ret = copy_mc_to_kernel(daddr + tail_off,
1168                                                 saddr + tail_off, tail_len);
1169                         if (ret)
1170                                 return -EIO;
1171                 }
1172         }
1173 out:
1174         if (zero_edge)
1175                 dax_flush(srcmap->dax_dev, daddr, size);
1176         return ret ? -EIO : 0;
1177 }
1178 
1179 /*
1180  * The user has performed a load from a hole in the file.  Allocating a new
1181  * page in the file would cause excessive storage usage for workloads with
1182  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1183  * If this page is ever written to we will re-fault and change the mapping to
1184  * point to real DAX storage instead.
1185  */
1186 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1187                 const struct iomap_iter *iter, void **entry)
1188 {
1189         struct inode *inode = iter->inode;
1190         unsigned long vaddr = vmf->address;
1191         pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1192         vm_fault_t ret;
1193 
1194         *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
1195 
1196         ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1197         trace_dax_load_hole(inode, vmf, ret);
1198         return ret;
1199 }
1200 
1201 #ifdef CONFIG_FS_DAX_PMD
1202 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1203                 const struct iomap_iter *iter, void **entry)
1204 {
1205         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1206         unsigned long pmd_addr = vmf->address & PMD_MASK;
1207         struct vm_area_struct *vma = vmf->vma;
1208         struct inode *inode = mapping->host;
1209         pgtable_t pgtable = NULL;
1210         struct folio *zero_folio;
1211         spinlock_t *ptl;
1212         pmd_t pmd_entry;
1213         pfn_t pfn;
1214 
1215         zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
1216 
1217         if (unlikely(!zero_folio))
1218                 goto fallback;
1219 
1220         pfn = page_to_pfn_t(&zero_folio->page);
1221         *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1222                                   DAX_PMD | DAX_ZERO_PAGE);
1223 
1224         if (arch_needs_pgtable_deposit()) {
1225                 pgtable = pte_alloc_one(vma->vm_mm);
1226                 if (!pgtable)
1227                         return VM_FAULT_OOM;
1228         }
1229 
1230         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1231         if (!pmd_none(*(vmf->pmd))) {
1232                 spin_unlock(ptl);
1233                 goto fallback;
1234         }
1235 
1236         if (pgtable) {
1237                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1238                 mm_inc_nr_ptes(vma->vm_mm);
1239         }
1240         pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
1241         pmd_entry = pmd_mkhuge(pmd_entry);
1242         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1243         spin_unlock(ptl);
1244         trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
1245         return VM_FAULT_NOPAGE;
1246 
1247 fallback:
1248         if (pgtable)
1249                 pte_free(vma->vm_mm, pgtable);
1250         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
1251         return VM_FAULT_FALLBACK;
1252 }
1253 #else
1254 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1255                 const struct iomap_iter *iter, void **entry)
1256 {
1257         return VM_FAULT_FALLBACK;
1258 }
1259 #endif /* CONFIG_FS_DAX_PMD */
1260 
1261 static s64 dax_unshare_iter(struct iomap_iter *iter)
1262 {
1263         struct iomap *iomap = &iter->iomap;
1264         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1265         loff_t pos = iter->pos;
1266         loff_t length = iomap_length(iter);
1267         int id = 0;
1268         s64 ret = 0;
1269         void *daddr = NULL, *saddr = NULL;
1270 
1271         /* don't bother with blocks that are not shared to start with */
1272         if (!(iomap->flags & IOMAP_F_SHARED))
1273                 return length;
1274 
1275         id = dax_read_lock();
1276         ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
1277         if (ret < 0)
1278                 goto out_unlock;
1279 
1280         /* zero the distance if srcmap is HOLE or UNWRITTEN */
1281         if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) {
1282                 memset(daddr, 0, length);
1283                 dax_flush(iomap->dax_dev, daddr, length);
1284                 ret = length;
1285                 goto out_unlock;
1286         }
1287 
1288         ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
1289         if (ret < 0)
1290                 goto out_unlock;
1291 
1292         if (copy_mc_to_kernel(daddr, saddr, length) == 0)
1293                 ret = length;
1294         else
1295                 ret = -EIO;
1296 
1297 out_unlock:
1298         dax_read_unlock(id);
1299         return dax_mem2blk_err(ret);
1300 }
1301 
1302 int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1303                 const struct iomap_ops *ops)
1304 {
1305         struct iomap_iter iter = {
1306                 .inode          = inode,
1307                 .pos            = pos,
1308                 .flags          = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
1309         };
1310         loff_t size = i_size_read(inode);
1311         int ret;
1312 
1313         if (pos < 0 || pos >= size)
1314                 return 0;
1315 
1316         iter.len = min(len, size - pos);
1317         while ((ret = iomap_iter(&iter, ops)) > 0)
1318                 iter.processed = dax_unshare_iter(&iter);
1319         return ret;
1320 }
1321 EXPORT_SYMBOL_GPL(dax_file_unshare);
1322 
1323 static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
1324 {
1325         const struct iomap *iomap = &iter->iomap;
1326         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1327         unsigned offset = offset_in_page(pos);
1328         pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1329         void *kaddr;
1330         long ret;
1331 
1332         ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
1333                                 NULL);
1334         if (ret < 0)
1335                 return dax_mem2blk_err(ret);
1336 
1337         memset(kaddr + offset, 0, size);
1338         if (iomap->flags & IOMAP_F_SHARED)
1339                 ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
1340                                             kaddr);
1341         else
1342                 dax_flush(iomap->dax_dev, kaddr + offset, size);
1343         return ret;
1344 }
1345 
1346 static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
1347 {
1348         const struct iomap *iomap = &iter->iomap;
1349         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1350         loff_t pos = iter->pos;
1351         u64 length = iomap_length(iter);
1352         s64 written = 0;
1353 
1354         /* already zeroed?  we're done. */
1355         if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1356                 return length;
1357 
1358         /*
1359          * invalidate the pages whose sharing state is to be changed
1360          * because of CoW.
1361          */
1362         if (iomap->flags & IOMAP_F_SHARED)
1363                 invalidate_inode_pages2_range(iter->inode->i_mapping,
1364                                               pos >> PAGE_SHIFT,
1365                                               (pos + length - 1) >> PAGE_SHIFT);
1366 
1367         do {
1368                 unsigned offset = offset_in_page(pos);
1369                 unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1370                 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1371                 long rc;
1372                 int id;
1373 
1374                 id = dax_read_lock();
1375                 if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1376                         rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1377                 else
1378                         rc = dax_memzero(iter, pos, size);
1379                 dax_read_unlock(id);
1380 
1381                 if (rc < 0)
1382                         return rc;
1383                 pos += size;
1384                 length -= size;
1385                 written += size;
1386         } while (length > 0);
1387 
1388         if (did_zero)
1389                 *did_zero = true;
1390         return written;
1391 }
1392 
1393 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1394                 const struct iomap_ops *ops)
1395 {
1396         struct iomap_iter iter = {
1397                 .inode          = inode,
1398                 .pos            = pos,
1399                 .len            = len,
1400                 .flags          = IOMAP_DAX | IOMAP_ZERO,
1401         };
1402         int ret;
1403 
1404         while ((ret = iomap_iter(&iter, ops)) > 0)
1405                 iter.processed = dax_zero_iter(&iter, did_zero);
1406         return ret;
1407 }
1408 EXPORT_SYMBOL_GPL(dax_zero_range);
1409 
1410 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1411                 const struct iomap_ops *ops)
1412 {
1413         unsigned int blocksize = i_blocksize(inode);
1414         unsigned int off = pos & (blocksize - 1);
1415 
1416         /* Block boundary? Nothing to do */
1417         if (!off)
1418                 return 0;
1419         return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
1420 }
1421 EXPORT_SYMBOL_GPL(dax_truncate_page);
1422 
1423 static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1424                 struct iov_iter *iter)
1425 {
1426         const struct iomap *iomap = &iomi->iomap;
1427         const struct iomap *srcmap = iomap_iter_srcmap(iomi);
1428         loff_t length = iomap_length(iomi);
1429         loff_t pos = iomi->pos;
1430         struct dax_device *dax_dev = iomap->dax_dev;
1431         loff_t end = pos + length, done = 0;
1432         bool write = iov_iter_rw(iter) == WRITE;
1433         bool cow = write && iomap->flags & IOMAP_F_SHARED;
1434         ssize_t ret = 0;
1435         size_t xfer;
1436         int id;
1437 
1438         if (!write) {
1439                 end = min(end, i_size_read(iomi->inode));
1440                 if (pos >= end)
1441                         return 0;
1442 
1443                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1444                         return iov_iter_zero(min(length, end - pos), iter);
1445         }
1446 
1447         /*
1448          * In DAX mode, enforce either pure overwrites of written extents, or
1449          * writes to unwritten extents as part of a copy-on-write operation.
1450          */
1451         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1452                         !(iomap->flags & IOMAP_F_SHARED)))
1453                 return -EIO;
1454 
1455         /*
1456          * Write can allocate block for an area which has a hole page mapped
1457          * into page tables. We have to tear down these mappings so that data
1458          * written by write(2) is visible in mmap.
1459          */
1460         if (iomap->flags & IOMAP_F_NEW || cow) {
1461                 /*
1462                  * Filesystem allows CoW on non-shared extents. The src extents
1463                  * may have been mmapped with dirty mark before. To be able to
1464                  * invalidate its dax entries, we need to clear the dirty mark
1465                  * in advance.
1466                  */
1467                 if (cow)
1468                         __dax_clear_dirty_range(iomi->inode->i_mapping,
1469                                                 pos >> PAGE_SHIFT,
1470                                                 (end - 1) >> PAGE_SHIFT);
1471                 invalidate_inode_pages2_range(iomi->inode->i_mapping,
1472                                               pos >> PAGE_SHIFT,
1473                                               (end - 1) >> PAGE_SHIFT);
1474         }
1475 
1476         id = dax_read_lock();
1477         while (pos < end) {
1478                 unsigned offset = pos & (PAGE_SIZE - 1);
1479                 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1480                 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1481                 ssize_t map_len;
1482                 bool recovery = false;
1483                 void *kaddr;
1484 
1485                 if (fatal_signal_pending(current)) {
1486                         ret = -EINTR;
1487                         break;
1488                 }
1489 
1490                 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1491                                 DAX_ACCESS, &kaddr, NULL);
1492                 if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) {
1493                         map_len = dax_direct_access(dax_dev, pgoff,
1494                                         PHYS_PFN(size), DAX_RECOVERY_WRITE,
1495                                         &kaddr, NULL);
1496                         if (map_len > 0)
1497                                 recovery = true;
1498                 }
1499                 if (map_len < 0) {
1500                         ret = dax_mem2blk_err(map_len);
1501                         break;
1502                 }
1503 
1504                 if (cow) {
1505                         ret = dax_iomap_copy_around(pos, length, PAGE_SIZE,
1506                                                     srcmap, kaddr);
1507                         if (ret)
1508                                 break;
1509                 }
1510 
1511                 map_len = PFN_PHYS(map_len);
1512                 kaddr += offset;
1513                 map_len -= offset;
1514                 if (map_len > end - pos)
1515                         map_len = end - pos;
1516 
1517                 if (recovery)
1518                         xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1519                                         map_len, iter);
1520                 else if (write)
1521                         xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1522                                         map_len, iter);
1523                 else
1524                         xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1525                                         map_len, iter);
1526 
1527                 pos += xfer;
1528                 length -= xfer;
1529                 done += xfer;
1530 
1531                 if (xfer == 0)
1532                         ret = -EFAULT;
1533                 if (xfer < map_len)
1534                         break;
1535         }
1536         dax_read_unlock(id);
1537 
1538         return done ? done : ret;
1539 }
1540 
1541 /**
1542  * dax_iomap_rw - Perform I/O to a DAX file
1543  * @iocb:       The control block for this I/O
1544  * @iter:       The addresses to do I/O from or to
1545  * @ops:        iomap ops passed from the file system
1546  *
1547  * This function performs read and write operations to directly mapped
1548  * persistent memory.  The callers needs to take care of read/write exclusion
1549  * and evicting any page cache pages in the region under I/O.
1550  */
1551 ssize_t
1552 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1553                 const struct iomap_ops *ops)
1554 {
1555         struct iomap_iter iomi = {
1556                 .inode          = iocb->ki_filp->f_mapping->host,
1557                 .pos            = iocb->ki_pos,
1558                 .len            = iov_iter_count(iter),
1559                 .flags          = IOMAP_DAX,
1560         };
1561         loff_t done = 0;
1562         int ret;
1563 
1564         if (!iomi.len)
1565                 return 0;
1566 
1567         if (iov_iter_rw(iter) == WRITE) {
1568                 lockdep_assert_held_write(&iomi.inode->i_rwsem);
1569                 iomi.flags |= IOMAP_WRITE;
1570         } else {
1571                 lockdep_assert_held(&iomi.inode->i_rwsem);
1572         }
1573 
1574         if (iocb->ki_flags & IOCB_NOWAIT)
1575                 iomi.flags |= IOMAP_NOWAIT;
1576 
1577         while ((ret = iomap_iter(&iomi, ops)) > 0)
1578                 iomi.processed = dax_iomap_iter(&iomi, iter);
1579 
1580         done = iomi.pos - iocb->ki_pos;
1581         iocb->ki_pos = iomi.pos;
1582         return done ? done : ret;
1583 }
1584 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1585 
1586 static vm_fault_t dax_fault_return(int error)
1587 {
1588         if (error == 0)
1589                 return VM_FAULT_NOPAGE;
1590         return vmf_error(error);
1591 }
1592 
1593 /*
1594  * When handling a synchronous page fault and the inode need a fsync, we can
1595  * insert the PTE/PMD into page tables only after that fsync happened. Skip
1596  * insertion for now and return the pfn so that caller can insert it after the
1597  * fsync is done.
1598  */
1599 static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1600 {
1601         if (WARN_ON_ONCE(!pfnp))
1602                 return VM_FAULT_SIGBUS;
1603         *pfnp = pfn;
1604         return VM_FAULT_NEEDDSYNC;
1605 }
1606 
1607 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1608                 const struct iomap_iter *iter)
1609 {
1610         vm_fault_t ret;
1611         int error = 0;
1612 
1613         switch (iter->iomap.type) {
1614         case IOMAP_HOLE:
1615         case IOMAP_UNWRITTEN:
1616                 clear_user_highpage(vmf->cow_page, vmf->address);
1617                 break;
1618         case IOMAP_MAPPED:
1619                 error = copy_cow_page_dax(vmf, iter);
1620                 break;
1621         default:
1622                 WARN_ON_ONCE(1);
1623                 error = -EIO;
1624                 break;
1625         }
1626 
1627         if (error)
1628                 return dax_fault_return(error);
1629 
1630         __SetPageUptodate(vmf->cow_page);
1631         ret = finish_fault(vmf);
1632         if (!ret)
1633                 return VM_FAULT_DONE_COW;
1634         return ret;
1635 }
1636 
1637 /**
1638  * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1639  * @vmf:        vm fault instance
1640  * @iter:       iomap iter
1641  * @pfnp:       pfn to be returned
1642  * @xas:        the dax mapping tree of a file
1643  * @entry:      an unlocked dax entry to be inserted
1644  * @pmd:        distinguish whether it is a pmd fault
1645  */
1646 static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1647                 const struct iomap_iter *iter, pfn_t *pfnp,
1648                 struct xa_state *xas, void **entry, bool pmd)
1649 {
1650         const struct iomap *iomap = &iter->iomap;
1651         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1652         size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1653         loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1654         bool write = iter->flags & IOMAP_WRITE;
1655         unsigned long entry_flags = pmd ? DAX_PMD : 0;
1656         int err = 0;
1657         pfn_t pfn;
1658         void *kaddr;
1659 
1660         if (!pmd && vmf->cow_page)
1661                 return dax_fault_cow_page(vmf, iter);
1662 
1663         /* if we are reading UNWRITTEN and HOLE, return a hole. */
1664         if (!write &&
1665             (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1666                 if (!pmd)
1667                         return dax_load_hole(xas, vmf, iter, entry);
1668                 return dax_pmd_load_hole(xas, vmf, iter, entry);
1669         }
1670 
1671         if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
1672                 WARN_ON_ONCE(1);
1673                 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1674         }
1675 
1676         err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
1677         if (err)
1678                 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1679 
1680         *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
1681 
1682         if (write && iomap->flags & IOMAP_F_SHARED) {
1683                 err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr);
1684                 if (err)
1685                         return dax_fault_return(err);
1686         }
1687 
1688         if (dax_fault_is_synchronous(iter, vmf->vma))
1689                 return dax_fault_synchronous_pfnp(pfnp, pfn);
1690 
1691         /* insert PMD pfn */
1692         if (pmd)
1693                 return vmf_insert_pfn_pmd(vmf, pfn, write);
1694 
1695         /* insert PTE pfn */
1696         if (write)
1697                 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1698         return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1699 }
1700 
1701 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1702                                int *iomap_errp, const struct iomap_ops *ops)
1703 {
1704         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1705         XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1706         struct iomap_iter iter = {
1707                 .inode          = mapping->host,
1708                 .pos            = (loff_t)vmf->pgoff << PAGE_SHIFT,
1709                 .len            = PAGE_SIZE,
1710                 .flags          = IOMAP_DAX | IOMAP_FAULT,
1711         };
1712         vm_fault_t ret = 0;
1713         void *entry;
1714         int error;
1715 
1716         trace_dax_pte_fault(iter.inode, vmf, ret);
1717         /*
1718          * Check whether offset isn't beyond end of file now. Caller is supposed
1719          * to hold locks serializing us with truncate / punch hole so this is
1720          * a reliable test.
1721          */
1722         if (iter.pos >= i_size_read(iter.inode)) {
1723                 ret = VM_FAULT_SIGBUS;
1724                 goto out;
1725         }
1726 
1727         if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1728                 iter.flags |= IOMAP_WRITE;
1729 
1730         entry = grab_mapping_entry(&xas, mapping, 0);
1731         if (xa_is_internal(entry)) {
1732                 ret = xa_to_internal(entry);
1733                 goto out;
1734         }
1735 
1736         /*
1737          * It is possible, particularly with mixed reads & writes to private
1738          * mappings, that we have raced with a PMD fault that overlaps with
1739          * the PTE we need to set up.  If so just return and the fault will be
1740          * retried.
1741          */
1742         if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1743                 ret = VM_FAULT_NOPAGE;
1744                 goto unlock_entry;
1745         }
1746 
1747         while ((error = iomap_iter(&iter, ops)) > 0) {
1748                 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1749                         iter.processed = -EIO;  /* fs corruption? */
1750                         continue;
1751                 }
1752 
1753                 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1754                 if (ret != VM_FAULT_SIGBUS &&
1755                     (iter.iomap.flags & IOMAP_F_NEW)) {
1756                         count_vm_event(PGMAJFAULT);
1757                         count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1758                         ret |= VM_FAULT_MAJOR;
1759                 }
1760 
1761                 if (!(ret & VM_FAULT_ERROR))
1762                         iter.processed = PAGE_SIZE;
1763         }
1764 
1765         if (iomap_errp)
1766                 *iomap_errp = error;
1767         if (!ret && error)
1768                 ret = dax_fault_return(error);
1769 
1770 unlock_entry:
1771         dax_unlock_entry(&xas, entry);
1772 out:
1773         trace_dax_pte_fault_done(iter.inode, vmf, ret);
1774         return ret;
1775 }
1776 
1777 #ifdef CONFIG_FS_DAX_PMD
1778 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1779                 pgoff_t max_pgoff)
1780 {
1781         unsigned long pmd_addr = vmf->address & PMD_MASK;
1782         bool write = vmf->flags & FAULT_FLAG_WRITE;
1783 
1784         /*
1785          * Make sure that the faulting address's PMD offset (color) matches
1786          * the PMD offset from the start of the file.  This is necessary so
1787          * that a PMD range in the page table overlaps exactly with a PMD
1788          * range in the page cache.
1789          */
1790         if ((vmf->pgoff & PG_PMD_COLOUR) !=
1791             ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1792                 return true;
1793 
1794         /* Fall back to PTEs if we're going to COW */
1795         if (write && !(vmf->vma->vm_flags & VM_SHARED))
1796                 return true;
1797 
1798         /* If the PMD would extend outside the VMA */
1799         if (pmd_addr < vmf->vma->vm_start)
1800                 return true;
1801         if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1802                 return true;
1803 
1804         /* If the PMD would extend beyond the file size */
1805         if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1806                 return true;
1807 
1808         return false;
1809 }
1810 
1811 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1812                                const struct iomap_ops *ops)
1813 {
1814         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1815         XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1816         struct iomap_iter iter = {
1817                 .inode          = mapping->host,
1818                 .len            = PMD_SIZE,
1819                 .flags          = IOMAP_DAX | IOMAP_FAULT,
1820         };
1821         vm_fault_t ret = VM_FAULT_FALLBACK;
1822         pgoff_t max_pgoff;
1823         void *entry;
1824 
1825         if (vmf->flags & FAULT_FLAG_WRITE)
1826                 iter.flags |= IOMAP_WRITE;
1827 
1828         /*
1829          * Check whether offset isn't beyond end of file now. Caller is
1830          * supposed to hold locks serializing us with truncate / punch hole so
1831          * this is a reliable test.
1832          */
1833         max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
1834 
1835         trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
1836 
1837         if (xas.xa_index >= max_pgoff) {
1838                 ret = VM_FAULT_SIGBUS;
1839                 goto out;
1840         }
1841 
1842         if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
1843                 goto fallback;
1844 
1845         /*
1846          * grab_mapping_entry() will make sure we get an empty PMD entry,
1847          * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1848          * entry is already in the array, for instance), it will return
1849          * VM_FAULT_FALLBACK.
1850          */
1851         entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1852         if (xa_is_internal(entry)) {
1853                 ret = xa_to_internal(entry);
1854                 goto fallback;
1855         }
1856 
1857         /*
1858          * It is possible, particularly with mixed reads & writes to private
1859          * mappings, that we have raced with a PTE fault that overlaps with
1860          * the PMD we need to set up.  If so just return and the fault will be
1861          * retried.
1862          */
1863         if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1864                         !pmd_devmap(*vmf->pmd)) {
1865                 ret = 0;
1866                 goto unlock_entry;
1867         }
1868 
1869         iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1870         while (iomap_iter(&iter, ops) > 0) {
1871                 if (iomap_length(&iter) < PMD_SIZE)
1872                         continue; /* actually breaks out of the loop */
1873 
1874                 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1875                 if (ret != VM_FAULT_FALLBACK)
1876                         iter.processed = PMD_SIZE;
1877         }
1878 
1879 unlock_entry:
1880         dax_unlock_entry(&xas, entry);
1881 fallback:
1882         if (ret == VM_FAULT_FALLBACK) {
1883                 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
1884                 count_vm_event(THP_FAULT_FALLBACK);
1885         }
1886 out:
1887         trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1888         return ret;
1889 }
1890 #else
1891 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1892                                const struct iomap_ops *ops)
1893 {
1894         return VM_FAULT_FALLBACK;
1895 }
1896 #endif /* CONFIG_FS_DAX_PMD */
1897 
1898 /**
1899  * dax_iomap_fault - handle a page fault on a DAX file
1900  * @vmf: The description of the fault
1901  * @order: Order of the page to fault in
1902  * @pfnp: PFN to insert for synchronous faults if fsync is required
1903  * @iomap_errp: Storage for detailed error code in case of error
1904  * @ops: Iomap ops passed from the file system
1905  *
1906  * When a page fault occurs, filesystems may call this helper in
1907  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1908  * has done all the necessary locking for page fault to proceed
1909  * successfully.
1910  */
1911 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
1912                     pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1913 {
1914         if (order == 0)
1915                 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1916         else if (order == PMD_ORDER)
1917                 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1918         else
1919                 return VM_FAULT_FALLBACK;
1920 }
1921 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1922 
1923 /*
1924  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1925  * @vmf: The description of the fault
1926  * @pfn: PFN to insert
1927  * @order: Order of entry to insert.
1928  *
1929  * This function inserts a writeable PTE or PMD entry into the page tables
1930  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1931  */
1932 static vm_fault_t
1933 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1934 {
1935         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1936         XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1937         void *entry;
1938         vm_fault_t ret;
1939 
1940         xas_lock_irq(&xas);
1941         entry = get_unlocked_entry(&xas, order);
1942         /* Did we race with someone splitting entry or so? */
1943         if (!entry || dax_is_conflict(entry) ||
1944             (order == 0 && !dax_is_pte_entry(entry))) {
1945                 put_unlocked_entry(&xas, entry, WAKE_NEXT);
1946                 xas_unlock_irq(&xas);
1947                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1948                                                       VM_FAULT_NOPAGE);
1949                 return VM_FAULT_NOPAGE;
1950         }
1951         xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1952         dax_lock_entry(&xas, entry);
1953         xas_unlock_irq(&xas);
1954         if (order == 0)
1955                 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1956 #ifdef CONFIG_FS_DAX_PMD
1957         else if (order == PMD_ORDER)
1958                 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1959 #endif
1960         else
1961                 ret = VM_FAULT_FALLBACK;
1962         dax_unlock_entry(&xas, entry);
1963         trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1964         return ret;
1965 }
1966 
1967 /**
1968  * dax_finish_sync_fault - finish synchronous page fault
1969  * @vmf: The description of the fault
1970  * @order: Order of entry to be inserted
1971  * @pfn: PFN to insert
1972  *
1973  * This function ensures that the file range touched by the page fault is
1974  * stored persistently on the media and handles inserting of appropriate page
1975  * table entry.
1976  */
1977 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
1978                 pfn_t pfn)
1979 {
1980         int err;
1981         loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1982         size_t len = PAGE_SIZE << order;
1983 
1984         err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1985         if (err)
1986                 return VM_FAULT_SIGBUS;
1987         return dax_insert_pfn_mkwrite(vmf, pfn, order);
1988 }
1989 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1990 
1991 static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
1992                 struct iomap_iter *it_dest, u64 len, bool *same)
1993 {
1994         const struct iomap *smap = &it_src->iomap;
1995         const struct iomap *dmap = &it_dest->iomap;
1996         loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
1997         void *saddr, *daddr;
1998         int id, ret;
1999 
2000         len = min(len, min(smap->length, dmap->length));
2001 
2002         if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
2003                 *same = true;
2004                 return len;
2005         }
2006 
2007         if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
2008                 *same = false;
2009                 return 0;
2010         }
2011 
2012         id = dax_read_lock();
2013         ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
2014                                       &saddr, NULL);
2015         if (ret < 0)
2016                 goto out_unlock;
2017 
2018         ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
2019                                       &daddr, NULL);
2020         if (ret < 0)
2021                 goto out_unlock;
2022 
2023         *same = !memcmp(saddr, daddr, len);
2024         if (!*same)
2025                 len = 0;
2026         dax_read_unlock(id);
2027         return len;
2028 
2029 out_unlock:
2030         dax_read_unlock(id);
2031         return -EIO;
2032 }
2033 
2034 int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
2035                 struct inode *dst, loff_t dstoff, loff_t len, bool *same,
2036                 const struct iomap_ops *ops)
2037 {
2038         struct iomap_iter src_iter = {
2039                 .inode          = src,
2040                 .pos            = srcoff,
2041                 .len            = len,
2042                 .flags          = IOMAP_DAX,
2043         };
2044         struct iomap_iter dst_iter = {
2045                 .inode          = dst,
2046                 .pos            = dstoff,
2047                 .len            = len,
2048                 .flags          = IOMAP_DAX,
2049         };
2050         int ret, compared = 0;
2051 
2052         while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
2053                (ret = iomap_iter(&dst_iter, ops)) > 0) {
2054                 compared = dax_range_compare_iter(&src_iter, &dst_iter,
2055                                 min(src_iter.len, dst_iter.len), same);
2056                 if (compared < 0)
2057                         return ret;
2058                 src_iter.processed = dst_iter.processed = compared;
2059         }
2060         return ret;
2061 }
2062 
2063 int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
2064                               struct file *file_out, loff_t pos_out,
2065                               loff_t *len, unsigned int remap_flags,
2066                               const struct iomap_ops *ops)
2067 {
2068         return __generic_remap_file_range_prep(file_in, pos_in, file_out,
2069                                                pos_out, len, remap_flags, ops);
2070 }
2071 EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
2072 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php