~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/netfs/iterator.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /* Iterator helpers.
  3  *
  4  * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
  5  * Written by David Howells (dhowells@redhat.com)
  6  */
  7 
  8 #include <linux/export.h>
  9 #include <linux/slab.h>
 10 #include <linux/mm.h>
 11 #include <linux/uio.h>
 12 #include <linux/scatterlist.h>
 13 #include <linux/netfs.h>
 14 #include "internal.h"
 15 
 16 /**
 17  * netfs_extract_user_iter - Extract the pages from a user iterator into a bvec
 18  * @orig: The original iterator
 19  * @orig_len: The amount of iterator to copy
 20  * @new: The iterator to be set up
 21  * @extraction_flags: Flags to qualify the request
 22  *
 23  * Extract the page fragments from the given amount of the source iterator and
 24  * build up a second iterator that refers to all of those bits.  This allows
 25  * the original iterator to disposed of.
 26  *
 27  * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be
 28  * allowed on the pages extracted.
 29  *
 30  * On success, the number of elements in the bvec is returned, the original
 31  * iterator will have been advanced by the amount extracted.
 32  *
 33  * The iov_iter_extract_mode() function should be used to query how cleanup
 34  * should be performed.
 35  */
 36 ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
 37                                 struct iov_iter *new,
 38                                 iov_iter_extraction_t extraction_flags)
 39 {
 40         struct bio_vec *bv = NULL;
 41         struct page **pages;
 42         unsigned int cur_npages;
 43         unsigned int max_pages;
 44         unsigned int npages = 0;
 45         unsigned int i;
 46         ssize_t ret;
 47         size_t count = orig_len, offset, len;
 48         size_t bv_size, pg_size;
 49 
 50         if (WARN_ON_ONCE(!iter_is_ubuf(orig) && !iter_is_iovec(orig)))
 51                 return -EIO;
 52 
 53         max_pages = iov_iter_npages(orig, INT_MAX);
 54         bv_size = array_size(max_pages, sizeof(*bv));
 55         bv = kvmalloc(bv_size, GFP_KERNEL);
 56         if (!bv)
 57                 return -ENOMEM;
 58 
 59         /* Put the page list at the end of the bvec list storage.  bvec
 60          * elements are larger than page pointers, so as long as we work
 61          * 0->last, we should be fine.
 62          */
 63         pg_size = array_size(max_pages, sizeof(*pages));
 64         pages = (void *)bv + bv_size - pg_size;
 65 
 66         while (count && npages < max_pages) {
 67                 ret = iov_iter_extract_pages(orig, &pages, count,
 68                                              max_pages - npages, extraction_flags,
 69                                              &offset);
 70                 if (ret < 0) {
 71                         pr_err("Couldn't get user pages (rc=%zd)\n", ret);
 72                         break;
 73                 }
 74 
 75                 if (ret > count) {
 76                         pr_err("get_pages rc=%zd more than %zu\n", ret, count);
 77                         break;
 78                 }
 79 
 80                 count -= ret;
 81                 ret += offset;
 82                 cur_npages = DIV_ROUND_UP(ret, PAGE_SIZE);
 83 
 84                 if (npages + cur_npages > max_pages) {
 85                         pr_err("Out of bvec array capacity (%u vs %u)\n",
 86                                npages + cur_npages, max_pages);
 87                         break;
 88                 }
 89 
 90                 for (i = 0; i < cur_npages; i++) {
 91                         len = ret > PAGE_SIZE ? PAGE_SIZE : ret;
 92                         bvec_set_page(bv + npages + i, *pages++, len - offset, offset);
 93                         ret -= len;
 94                         offset = 0;
 95                 }
 96 
 97                 npages += cur_npages;
 98         }
 99 
100         iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count);
101         return npages;
102 }
103 EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
104 
105 /*
106  * Select the span of a bvec iterator we're going to use.  Limit it by both maximum
107  * size and maximum number of segments.  Returns the size of the span in bytes.
108  */
109 static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
110                                size_t max_size, size_t max_segs)
111 {
112         const struct bio_vec *bvecs = iter->bvec;
113         unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
114         size_t len, span = 0, n = iter->count;
115         size_t skip = iter->iov_offset + start_offset;
116 
117         if (WARN_ON(!iov_iter_is_bvec(iter)) ||
118             WARN_ON(start_offset > n) ||
119             n == 0)
120                 return 0;
121 
122         while (n && ix < nbv && skip) {
123                 len = bvecs[ix].bv_len;
124                 if (skip < len)
125                         break;
126                 skip -= len;
127                 n -= len;
128                 ix++;
129         }
130 
131         while (n && ix < nbv) {
132                 len = min3(n, bvecs[ix].bv_len - skip, max_size);
133                 span += len;
134                 nsegs++;
135                 ix++;
136                 if (span >= max_size || nsegs >= max_segs)
137                         break;
138                 skip = 0;
139                 n -= len;
140         }
141 
142         return min(span, max_size);
143 }
144 
145 /*
146  * Select the span of an xarray iterator we're going to use.  Limit it by both
147  * maximum size and maximum number of segments.  It is assumed that segments
148  * can be larger than a page in size, provided they're physically contiguous.
149  * Returns the size of the span in bytes.
150  */
151 static size_t netfs_limit_xarray(const struct iov_iter *iter, size_t start_offset,
152                                  size_t max_size, size_t max_segs)
153 {
154         struct folio *folio;
155         unsigned int nsegs = 0;
156         loff_t pos = iter->xarray_start + iter->iov_offset;
157         pgoff_t index = pos / PAGE_SIZE;
158         size_t span = 0, n = iter->count;
159 
160         XA_STATE(xas, iter->xarray, index);
161 
162         if (WARN_ON(!iov_iter_is_xarray(iter)) ||
163             WARN_ON(start_offset > n) ||
164             n == 0)
165                 return 0;
166         max_size = min(max_size, n - start_offset);
167 
168         rcu_read_lock();
169         xas_for_each(&xas, folio, ULONG_MAX) {
170                 size_t offset, flen, len;
171                 if (xas_retry(&xas, folio))
172                         continue;
173                 if (WARN_ON(xa_is_value(folio)))
174                         break;
175                 if (WARN_ON(folio_test_hugetlb(folio)))
176                         break;
177 
178                 flen = folio_size(folio);
179                 offset = offset_in_folio(folio, pos);
180                 len = min(max_size, flen - offset);
181                 span += len;
182                 nsegs++;
183                 if (span >= max_size || nsegs >= max_segs)
184                         break;
185         }
186 
187         rcu_read_unlock();
188         return min(span, max_size);
189 }
190 
191 size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
192                         size_t max_size, size_t max_segs)
193 {
194         if (iov_iter_is_bvec(iter))
195                 return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
196         if (iov_iter_is_xarray(iter))
197                 return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
198         BUG();
199 }
200 EXPORT_SYMBOL(netfs_limit_iter);
201 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php