~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/virt/kvm/guest_memfd.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/backing-dev.h>
  3 #include <linux/falloc.h>
  4 #include <linux/kvm_host.h>
  5 #include <linux/pagemap.h>
  6 #include <linux/anon_inodes.h>
  7 
  8 #include "kvm_mm.h"
  9 
 10 struct kvm_gmem {
 11         struct kvm *kvm;
 12         struct xarray bindings;
 13         struct list_head entry;
 14 };
 15 
 16 /**
 17  * folio_file_pfn - like folio_file_page, but return a pfn.
 18  * @folio: The folio which contains this index.
 19  * @index: The index we want to look up.
 20  *
 21  * Return: The pfn for this index.
 22  */
 23 static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
 24 {
 25         return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
 26 }
 27 
 28 static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
 29                                     pgoff_t index, struct folio *folio)
 30 {
 31 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
 32         kvm_pfn_t pfn = folio_file_pfn(folio, index);
 33         gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
 34         int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
 35         if (rc) {
 36                 pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
 37                                     index, gfn, pfn, rc);
 38                 return rc;
 39         }
 40 #endif
 41 
 42         return 0;
 43 }
 44 
 45 static inline void kvm_gmem_mark_prepared(struct folio *folio)
 46 {
 47         folio_mark_uptodate(folio);
 48 }
 49 
 50 /*
 51  * Process @folio, which contains @gfn, so that the guest can use it.
 52  * The folio must be locked and the gfn must be contained in @slot.
 53  * On successful return the guest sees a zero page so as to avoid
 54  * leaking host data and the up-to-date flag is set.
 55  */
 56 static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
 57                                   gfn_t gfn, struct folio *folio)
 58 {
 59         unsigned long nr_pages, i;
 60         pgoff_t index;
 61         int r;
 62 
 63         nr_pages = folio_nr_pages(folio);
 64         for (i = 0; i < nr_pages; i++)
 65                 clear_highpage(folio_page(folio, i));
 66 
 67         /*
 68          * Preparing huge folios should always be safe, since it should
 69          * be possible to split them later if needed.
 70          *
 71          * Right now the folio order is always going to be zero, but the
 72          * code is ready for huge folios.  The only assumption is that
 73          * the base pgoff of memslots is naturally aligned with the
 74          * requested page order, ensuring that huge folios can also use
 75          * huge page table entries for GPA->HPA mapping.
 76          *
 77          * The order will be passed when creating the guest_memfd, and
 78          * checked when creating memslots.
 79          */
 80         WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio)));
 81         index = gfn - slot->base_gfn + slot->gmem.pgoff;
 82         index = ALIGN_DOWN(index, 1 << folio_order(folio));
 83         r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
 84         if (!r)
 85                 kvm_gmem_mark_prepared(folio);
 86 
 87         return r;
 88 }
 89 
 90 /*
 91  * Returns a locked folio on success.  The caller is responsible for
 92  * setting the up-to-date flag before the memory is mapped into the guest.
 93  * There is no backing storage for the memory, so the folio will remain
 94  * up-to-date until it's removed.
 95  *
 96  * Ignore accessed, referenced, and dirty flags.  The memory is
 97  * unevictable and there is no storage to write back to.
 98  */
 99 static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
100 {
101         /* TODO: Support huge pages. */
102         return filemap_grab_folio(inode->i_mapping, index);
103 }
104 
105 static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
106                                       pgoff_t end)
107 {
108         bool flush = false, found_memslot = false;
109         struct kvm_memory_slot *slot;
110         struct kvm *kvm = gmem->kvm;
111         unsigned long index;
112 
113         xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
114                 pgoff_t pgoff = slot->gmem.pgoff;
115 
116                 struct kvm_gfn_range gfn_range = {
117                         .start = slot->base_gfn + max(pgoff, start) - pgoff,
118                         .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
119                         .slot = slot,
120                         .may_block = true,
121                 };
122 
123                 if (!found_memslot) {
124                         found_memslot = true;
125 
126                         KVM_MMU_LOCK(kvm);
127                         kvm_mmu_invalidate_begin(kvm);
128                 }
129 
130                 flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
131         }
132 
133         if (flush)
134                 kvm_flush_remote_tlbs(kvm);
135 
136         if (found_memslot)
137                 KVM_MMU_UNLOCK(kvm);
138 }
139 
140 static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
141                                     pgoff_t end)
142 {
143         struct kvm *kvm = gmem->kvm;
144 
145         if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
146                 KVM_MMU_LOCK(kvm);
147                 kvm_mmu_invalidate_end(kvm);
148                 KVM_MMU_UNLOCK(kvm);
149         }
150 }
151 
152 static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
153 {
154         struct list_head *gmem_list = &inode->i_mapping->i_private_list;
155         pgoff_t start = offset >> PAGE_SHIFT;
156         pgoff_t end = (offset + len) >> PAGE_SHIFT;
157         struct kvm_gmem *gmem;
158 
159         /*
160          * Bindings must be stable across invalidation to ensure the start+end
161          * are balanced.
162          */
163         filemap_invalidate_lock(inode->i_mapping);
164 
165         list_for_each_entry(gmem, gmem_list, entry)
166                 kvm_gmem_invalidate_begin(gmem, start, end);
167 
168         truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
169 
170         list_for_each_entry(gmem, gmem_list, entry)
171                 kvm_gmem_invalidate_end(gmem, start, end);
172 
173         filemap_invalidate_unlock(inode->i_mapping);
174 
175         return 0;
176 }
177 
178 static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
179 {
180         struct address_space *mapping = inode->i_mapping;
181         pgoff_t start, index, end;
182         int r;
183 
184         /* Dedicated guest is immutable by default. */
185         if (offset + len > i_size_read(inode))
186                 return -EINVAL;
187 
188         filemap_invalidate_lock_shared(mapping);
189 
190         start = offset >> PAGE_SHIFT;
191         end = (offset + len) >> PAGE_SHIFT;
192 
193         r = 0;
194         for (index = start; index < end; ) {
195                 struct folio *folio;
196 
197                 if (signal_pending(current)) {
198                         r = -EINTR;
199                         break;
200                 }
201 
202                 folio = kvm_gmem_get_folio(inode, index);
203                 if (IS_ERR(folio)) {
204                         r = PTR_ERR(folio);
205                         break;
206                 }
207 
208                 index = folio_next_index(folio);
209 
210                 folio_unlock(folio);
211                 folio_put(folio);
212 
213                 /* 64-bit only, wrapping the index should be impossible. */
214                 if (WARN_ON_ONCE(!index))
215                         break;
216 
217                 cond_resched();
218         }
219 
220         filemap_invalidate_unlock_shared(mapping);
221 
222         return r;
223 }
224 
225 static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
226                                loff_t len)
227 {
228         int ret;
229 
230         if (!(mode & FALLOC_FL_KEEP_SIZE))
231                 return -EOPNOTSUPP;
232 
233         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
234                 return -EOPNOTSUPP;
235 
236         if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))
237                 return -EINVAL;
238 
239         if (mode & FALLOC_FL_PUNCH_HOLE)
240                 ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
241         else
242                 ret = kvm_gmem_allocate(file_inode(file), offset, len);
243 
244         if (!ret)
245                 file_modified(file);
246         return ret;
247 }
248 
249 static int kvm_gmem_release(struct inode *inode, struct file *file)
250 {
251         struct kvm_gmem *gmem = file->private_data;
252         struct kvm_memory_slot *slot;
253         struct kvm *kvm = gmem->kvm;
254         unsigned long index;
255 
256         /*
257          * Prevent concurrent attempts to *unbind* a memslot.  This is the last
258          * reference to the file and thus no new bindings can be created, but
259          * dereferencing the slot for existing bindings needs to be protected
260          * against memslot updates, specifically so that unbind doesn't race
261          * and free the memslot (kvm_gmem_get_file() will return NULL).
262          */
263         mutex_lock(&kvm->slots_lock);
264 
265         filemap_invalidate_lock(inode->i_mapping);
266 
267         xa_for_each(&gmem->bindings, index, slot)
268                 rcu_assign_pointer(slot->gmem.file, NULL);
269 
270         synchronize_rcu();
271 
272         /*
273          * All in-flight operations are gone and new bindings can be created.
274          * Zap all SPTEs pointed at by this file.  Do not free the backing
275          * memory, as its lifetime is associated with the inode, not the file.
276          */
277         kvm_gmem_invalidate_begin(gmem, 0, -1ul);
278         kvm_gmem_invalidate_end(gmem, 0, -1ul);
279 
280         list_del(&gmem->entry);
281 
282         filemap_invalidate_unlock(inode->i_mapping);
283 
284         mutex_unlock(&kvm->slots_lock);
285 
286         xa_destroy(&gmem->bindings);
287         kfree(gmem);
288 
289         kvm_put_kvm(kvm);
290 
291         return 0;
292 }
293 
294 static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
295 {
296         /*
297          * Do not return slot->gmem.file if it has already been closed;
298          * there might be some time between the last fput() and when
299          * kvm_gmem_release() clears slot->gmem.file, and you do not
300          * want to spin in the meanwhile.
301          */
302         return get_file_active(&slot->gmem.file);
303 }
304 
305 static struct file_operations kvm_gmem_fops = {
306         .open           = generic_file_open,
307         .release        = kvm_gmem_release,
308         .fallocate      = kvm_gmem_fallocate,
309 };
310 
311 void kvm_gmem_init(struct module *module)
312 {
313         kvm_gmem_fops.owner = module;
314 }
315 
316 static int kvm_gmem_migrate_folio(struct address_space *mapping,
317                                   struct folio *dst, struct folio *src,
318                                   enum migrate_mode mode)
319 {
320         WARN_ON_ONCE(1);
321         return -EINVAL;
322 }
323 
324 static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
325 {
326         struct list_head *gmem_list = &mapping->i_private_list;
327         struct kvm_gmem *gmem;
328         pgoff_t start, end;
329 
330         filemap_invalidate_lock_shared(mapping);
331 
332         start = folio->index;
333         end = start + folio_nr_pages(folio);
334 
335         list_for_each_entry(gmem, gmem_list, entry)
336                 kvm_gmem_invalidate_begin(gmem, start, end);
337 
338         /*
339          * Do not truncate the range, what action is taken in response to the
340          * error is userspace's decision (assuming the architecture supports
341          * gracefully handling memory errors).  If/when the guest attempts to
342          * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
343          * at which point KVM can either terminate the VM or propagate the
344          * error to userspace.
345          */
346 
347         list_for_each_entry(gmem, gmem_list, entry)
348                 kvm_gmem_invalidate_end(gmem, start, end);
349 
350         filemap_invalidate_unlock_shared(mapping);
351 
352         return MF_DELAYED;
353 }
354 
355 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
356 static void kvm_gmem_free_folio(struct folio *folio)
357 {
358         struct page *page = folio_page(folio, 0);
359         kvm_pfn_t pfn = page_to_pfn(page);
360         int order = folio_order(folio);
361 
362         kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
363 }
364 #endif
365 
366 static const struct address_space_operations kvm_gmem_aops = {
367         .dirty_folio = noop_dirty_folio,
368         .migrate_folio  = kvm_gmem_migrate_folio,
369         .error_remove_folio = kvm_gmem_error_folio,
370 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
371         .free_folio = kvm_gmem_free_folio,
372 #endif
373 };
374 
375 static int kvm_gmem_getattr(struct mnt_idmap *idmap, const struct path *path,
376                             struct kstat *stat, u32 request_mask,
377                             unsigned int query_flags)
378 {
379         struct inode *inode = path->dentry->d_inode;
380 
381         generic_fillattr(idmap, request_mask, inode, stat);
382         return 0;
383 }
384 
385 static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
386                             struct iattr *attr)
387 {
388         return -EINVAL;
389 }
390 static const struct inode_operations kvm_gmem_iops = {
391         .getattr        = kvm_gmem_getattr,
392         .setattr        = kvm_gmem_setattr,
393 };
394 
395 static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
396 {
397         const char *anon_name = "[kvm-gmem]";
398         struct kvm_gmem *gmem;
399         struct inode *inode;
400         struct file *file;
401         int fd, err;
402 
403         fd = get_unused_fd_flags(0);
404         if (fd < 0)
405                 return fd;
406 
407         gmem = kzalloc(sizeof(*gmem), GFP_KERNEL);
408         if (!gmem) {
409                 err = -ENOMEM;
410                 goto err_fd;
411         }
412 
413         file = anon_inode_create_getfile(anon_name, &kvm_gmem_fops, gmem,
414                                          O_RDWR, NULL);
415         if (IS_ERR(file)) {
416                 err = PTR_ERR(file);
417                 goto err_gmem;
418         }
419 
420         file->f_flags |= O_LARGEFILE;
421 
422         inode = file->f_inode;
423         WARN_ON(file->f_mapping != inode->i_mapping);
424 
425         inode->i_private = (void *)(unsigned long)flags;
426         inode->i_op = &kvm_gmem_iops;
427         inode->i_mapping->a_ops = &kvm_gmem_aops;
428         inode->i_mode |= S_IFREG;
429         inode->i_size = size;
430         mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
431         mapping_set_inaccessible(inode->i_mapping);
432         /* Unmovable mappings are supposed to be marked unevictable as well. */
433         WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
434 
435         kvm_get_kvm(kvm);
436         gmem->kvm = kvm;
437         xa_init(&gmem->bindings);
438         list_add(&gmem->entry, &inode->i_mapping->i_private_list);
439 
440         fd_install(fd, file);
441         return fd;
442 
443 err_gmem:
444         kfree(gmem);
445 err_fd:
446         put_unused_fd(fd);
447         return err;
448 }
449 
450 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
451 {
452         loff_t size = args->size;
453         u64 flags = args->flags;
454         u64 valid_flags = 0;
455 
456         if (flags & ~valid_flags)
457                 return -EINVAL;
458 
459         if (size <= 0 || !PAGE_ALIGNED(size))
460                 return -EINVAL;
461 
462         return __kvm_gmem_create(kvm, size, flags);
463 }
464 
465 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
466                   unsigned int fd, loff_t offset)
467 {
468         loff_t size = slot->npages << PAGE_SHIFT;
469         unsigned long start, end;
470         struct kvm_gmem *gmem;
471         struct inode *inode;
472         struct file *file;
473         int r = -EINVAL;
474 
475         BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff));
476 
477         file = fget(fd);
478         if (!file)
479                 return -EBADF;
480 
481         if (file->f_op != &kvm_gmem_fops)
482                 goto err;
483 
484         gmem = file->private_data;
485         if (gmem->kvm != kvm)
486                 goto err;
487 
488         inode = file_inode(file);
489 
490         if (offset < 0 || !PAGE_ALIGNED(offset) ||
491             offset + size > i_size_read(inode))
492                 goto err;
493 
494         filemap_invalidate_lock(inode->i_mapping);
495 
496         start = offset >> PAGE_SHIFT;
497         end = start + slot->npages;
498 
499         if (!xa_empty(&gmem->bindings) &&
500             xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
501                 filemap_invalidate_unlock(inode->i_mapping);
502                 goto err;
503         }
504 
505         /*
506          * No synchronize_rcu() needed, any in-flight readers are guaranteed to
507          * be see either a NULL file or this new file, no need for them to go
508          * away.
509          */
510         rcu_assign_pointer(slot->gmem.file, file);
511         slot->gmem.pgoff = start;
512 
513         xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL);
514         filemap_invalidate_unlock(inode->i_mapping);
515 
516         /*
517          * Drop the reference to the file, even on success.  The file pins KVM,
518          * not the other way 'round.  Active bindings are invalidated if the
519          * file is closed before memslots are destroyed.
520          */
521         r = 0;
522 err:
523         fput(file);
524         return r;
525 }
526 
527 void kvm_gmem_unbind(struct kvm_memory_slot *slot)
528 {
529         unsigned long start = slot->gmem.pgoff;
530         unsigned long end = start + slot->npages;
531         struct kvm_gmem *gmem;
532         struct file *file;
533 
534         /*
535          * Nothing to do if the underlying file was already closed (or is being
536          * closed right now), kvm_gmem_release() invalidates all bindings.
537          */
538         file = kvm_gmem_get_file(slot);
539         if (!file)
540                 return;
541 
542         gmem = file->private_data;
543 
544         filemap_invalidate_lock(file->f_mapping);
545         xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
546         rcu_assign_pointer(slot->gmem.file, NULL);
547         synchronize_rcu();
548         filemap_invalidate_unlock(file->f_mapping);
549 
550         fput(file);
551 }
552 
553 /* Returns a locked folio on success.  */
554 static struct folio *
555 __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
556                    gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared,
557                    int *max_order)
558 {
559         pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
560         struct kvm_gmem *gmem = file->private_data;
561         struct folio *folio;
562 
563         if (file != slot->gmem.file) {
564                 WARN_ON_ONCE(slot->gmem.file);
565                 return ERR_PTR(-EFAULT);
566         }
567 
568         gmem = file->private_data;
569         if (xa_load(&gmem->bindings, index) != slot) {
570                 WARN_ON_ONCE(xa_load(&gmem->bindings, index));
571                 return ERR_PTR(-EIO);
572         }
573 
574         folio = kvm_gmem_get_folio(file_inode(file), index);
575         if (IS_ERR(folio))
576                 return folio;
577 
578         if (folio_test_hwpoison(folio)) {
579                 folio_unlock(folio);
580                 folio_put(folio);
581                 return ERR_PTR(-EHWPOISON);
582         }
583 
584         *pfn = folio_file_pfn(folio, index);
585         if (max_order)
586                 *max_order = 0;
587 
588         *is_prepared = folio_test_uptodate(folio);
589         return folio;
590 }
591 
592 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
593                      gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
594 {
595         struct file *file = kvm_gmem_get_file(slot);
596         struct folio *folio;
597         bool is_prepared = false;
598         int r = 0;
599 
600         if (!file)
601                 return -EFAULT;
602 
603         folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order);
604         if (IS_ERR(folio)) {
605                 r = PTR_ERR(folio);
606                 goto out;
607         }
608 
609         if (!is_prepared)
610                 r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
611 
612         folio_unlock(folio);
613         if (r < 0)
614                 folio_put(folio);
615 
616 out:
617         fput(file);
618         return r;
619 }
620 EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
621 
622 #ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
623 long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
624                        kvm_gmem_populate_cb post_populate, void *opaque)
625 {
626         struct file *file;
627         struct kvm_memory_slot *slot;
628         void __user *p;
629 
630         int ret = 0, max_order;
631         long i;
632 
633         lockdep_assert_held(&kvm->slots_lock);
634         if (npages < 0)
635                 return -EINVAL;
636 
637         slot = gfn_to_memslot(kvm, start_gfn);
638         if (!kvm_slot_can_be_private(slot))
639                 return -EINVAL;
640 
641         file = kvm_gmem_get_file(slot);
642         if (!file)
643                 return -EFAULT;
644 
645         filemap_invalidate_lock(file->f_mapping);
646 
647         npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
648         for (i = 0; i < npages; i += (1 << max_order)) {
649                 struct folio *folio;
650                 gfn_t gfn = start_gfn + i;
651                 bool is_prepared = false;
652                 kvm_pfn_t pfn;
653 
654                 if (signal_pending(current)) {
655                         ret = -EINTR;
656                         break;
657                 }
658 
659                 folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &is_prepared, &max_order);
660                 if (IS_ERR(folio)) {
661                         ret = PTR_ERR(folio);
662                         break;
663                 }
664 
665                 if (is_prepared) {
666                         folio_unlock(folio);
667                         folio_put(folio);
668                         ret = -EEXIST;
669                         break;
670                 }
671 
672                 folio_unlock(folio);
673                 WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
674                         (npages - i) < (1 << max_order));
675 
676                 ret = -EINVAL;
677                 while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
678                                                         KVM_MEMORY_ATTRIBUTE_PRIVATE,
679                                                         KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
680                         if (!max_order)
681                                 goto put_folio_and_exit;
682                         max_order--;
683                 }
684 
685                 p = src ? src + i * PAGE_SIZE : NULL;
686                 ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
687                 if (!ret)
688                         kvm_gmem_mark_prepared(folio);
689 
690 put_folio_and_exit:
691                 folio_put(folio);
692                 if (ret)
693                         break;
694         }
695 
696         filemap_invalidate_unlock(file->f_mapping);
697 
698         fput(file);
699         return ret && !i ? ret : i;
700 }
701 EXPORT_SYMBOL_GPL(kvm_gmem_populate);
702 #endif
703 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php