~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/netfs/read_pgpriv2.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/netfs/read_pgpriv2.c (Architecture mips) and /fs/netfs/read_pgpriv2.c (Architecture alpha)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /* Read with PG_private_2 [DEPRECATED].             2 /* Read with PG_private_2 [DEPRECATED].
  3  *                                                  3  *
  4  * Copyright (C) 2024 Red Hat, Inc. All Rights      4  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
  5  * Written by David Howells (dhowells@redhat.c      5  * Written by David Howells (dhowells@redhat.com)
  6  */                                                 6  */
  7                                                     7 
  8 #include <linux/export.h>                           8 #include <linux/export.h>
  9 #include <linux/fs.h>                               9 #include <linux/fs.h>
 10 #include <linux/mm.h>                              10 #include <linux/mm.h>
 11 #include <linux/pagemap.h>                         11 #include <linux/pagemap.h>
 12 #include <linux/slab.h>                            12 #include <linux/slab.h>
 13 #include <linux/task_io_accounting_ops.h>          13 #include <linux/task_io_accounting_ops.h>
 14 #include "internal.h"                              14 #include "internal.h"
 15                                                    15 
 16 /*                                                 16 /*
 17  * [DEPRECATED] Mark page as requiring copy-to     17  * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2.  The
 18  * third mark in the folio queue is used to in     18  * third mark in the folio queue is used to indicate that this folio needs
 19  * writing.                                        19  * writing.
 20  */                                                20  */
 21 void netfs_pgpriv2_mark_copy_to_cache(struct n     21 void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
 22                                       struct n     22                                       struct netfs_io_request *rreq,
 23                                       struct f     23                                       struct folio_queue *folioq,
 24                                       int slot     24                                       int slot)
 25 {                                                  25 {
 26         struct folio *folio = folioq_folio(fol     26         struct folio *folio = folioq_folio(folioq, slot);
 27                                                    27 
 28         trace_netfs_folio(folio, netfs_folio_t     28         trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
 29         folio_start_private_2(folio);              29         folio_start_private_2(folio);
 30         folioq_mark3(folioq, slot);                30         folioq_mark3(folioq, slot);
 31 }                                                  31 }
 32                                                    32 
 33 /*                                                 33 /*
 34  * [DEPRECATED] Cancel PG_private_2 on all mar     34  * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
 35  * unrecoverable error.                            35  * unrecoverable error.
 36  */                                                36  */
 37 static void netfs_pgpriv2_cancel(struct folio_     37 static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
 38 {                                                  38 {
 39         struct folio *folio;                       39         struct folio *folio;
 40         int slot;                                  40         int slot;
 41                                                    41 
 42         while (folioq) {                           42         while (folioq) {
 43                 if (!folioq->marks3) {             43                 if (!folioq->marks3) {
 44                         folioq = folioq->next;     44                         folioq = folioq->next;
 45                         continue;                  45                         continue;
 46                 }                                  46                 }
 47                                                    47 
 48                 slot = __ffs(folioq->marks3);      48                 slot = __ffs(folioq->marks3);
 49                 folio = folioq_folio(folioq, s     49                 folio = folioq_folio(folioq, slot);
 50                                                    50 
 51                 trace_netfs_folio(folio, netfs     51                 trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
 52                 folio_end_private_2(folio);        52                 folio_end_private_2(folio);
 53                 folioq_unmark3(folioq, slot);      53                 folioq_unmark3(folioq, slot);
 54         }                                          54         }
 55 }                                                  55 }
 56                                                    56 
 57 /*                                                 57 /*
 58  * [DEPRECATED] Copy a folio to the cache with     58  * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
 59  */                                                59  */
 60 static int netfs_pgpriv2_copy_folio(struct net     60 static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
 61 {                                                  61 {
 62         struct netfs_io_stream *cache  = &wreq     62         struct netfs_io_stream *cache  = &wreq->io_streams[1];
 63         size_t fsize = folio_size(folio), flen     63         size_t fsize = folio_size(folio), flen = fsize;
 64         loff_t fpos = folio_pos(folio), i_size     64         loff_t fpos = folio_pos(folio), i_size;
 65         bool to_eof = false;                       65         bool to_eof = false;
 66                                                    66 
 67         _enter("");                                67         _enter("");
 68                                                    68 
 69         /* netfs_perform_write() may shift i_s     69         /* netfs_perform_write() may shift i_size around the page or from out
 70          * of the page to beyond it, but canno     70          * of the page to beyond it, but cannot move i_size into or through the
 71          * page since we have it locked.           71          * page since we have it locked.
 72          */                                        72          */
 73         i_size = i_size_read(wreq->inode);         73         i_size = i_size_read(wreq->inode);
 74                                                    74 
 75         if (fpos >= i_size) {                      75         if (fpos >= i_size) {
 76                 /* mmap beyond eof. */             76                 /* mmap beyond eof. */
 77                 _debug("beyond eof");              77                 _debug("beyond eof");
 78                 folio_end_private_2(folio);        78                 folio_end_private_2(folio);
 79                 return 0;                          79                 return 0;
 80         }                                          80         }
 81                                                    81 
 82         if (fpos + fsize > wreq->i_size)           82         if (fpos + fsize > wreq->i_size)
 83                 wreq->i_size = i_size;             83                 wreq->i_size = i_size;
 84                                                    84 
 85         if (flen > i_size - fpos) {                85         if (flen > i_size - fpos) {
 86                 flen = i_size - fpos;              86                 flen = i_size - fpos;
 87                 to_eof = true;                     87                 to_eof = true;
 88         } else if (flen == i_size - fpos) {        88         } else if (flen == i_size - fpos) {
 89                 to_eof = true;                     89                 to_eof = true;
 90         }                                          90         }
 91                                                    91 
 92         _debug("folio %zx %zx", flen, fsize);      92         _debug("folio %zx %zx", flen, fsize);
 93                                                    93 
 94         trace_netfs_folio(folio, netfs_folio_t     94         trace_netfs_folio(folio, netfs_folio_trace_store_copy);
 95                                                    95 
 96         /* Attach the folio to the rolling buf     96         /* Attach the folio to the rolling buffer. */
 97         if (netfs_buffer_append_folio(wreq, fo     97         if (netfs_buffer_append_folio(wreq, folio, false) < 0)
 98                 return -ENOMEM;                    98                 return -ENOMEM;
 99                                                    99 
100         cache->submit_extendable_to = fsize;      100         cache->submit_extendable_to = fsize;
101         cache->submit_off = 0;                    101         cache->submit_off = 0;
102         cache->submit_len = flen;                 102         cache->submit_len = flen;
103                                                   103 
104         /* Attach the folio to one or more sub    104         /* Attach the folio to one or more subrequests.  For a big folio, we
105          * could end up with thousands of subr    105          * could end up with thousands of subrequests if the wsize is small -
106          * but we might need to wait during th    106          * but we might need to wait during the creation of subrequests for
107          * network resources (eg. SMB credits)    107          * network resources (eg. SMB credits).
108          */                                       108          */
109         do {                                      109         do {
110                 ssize_t part;                     110                 ssize_t part;
111                                                   111 
112                 wreq->io_iter.iov_offset = cac    112                 wreq->io_iter.iov_offset = cache->submit_off;
113                                                   113 
114                 atomic64_set(&wreq->issued_to,    114                 atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
115                 cache->submit_extendable_to =     115                 cache->submit_extendable_to = fsize - cache->submit_off;
116                 part = netfs_advance_write(wre    116                 part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
117                                            cac    117                                            cache->submit_len, to_eof);
118                 cache->submit_off += part;        118                 cache->submit_off += part;
119                 if (part > cache->submit_len)     119                 if (part > cache->submit_len)
120                         cache->submit_len = 0;    120                         cache->submit_len = 0;
121                 else                              121                 else
122                         cache->submit_len -= p    122                         cache->submit_len -= part;
123         } while (cache->submit_len > 0);          123         } while (cache->submit_len > 0);
124                                                   124 
125         wreq->io_iter.iov_offset = 0;             125         wreq->io_iter.iov_offset = 0;
126         iov_iter_advance(&wreq->io_iter, fsize    126         iov_iter_advance(&wreq->io_iter, fsize);
127         atomic64_set(&wreq->issued_to, fpos +     127         atomic64_set(&wreq->issued_to, fpos + fsize);
128                                                   128 
129         if (flen < fsize)                         129         if (flen < fsize)
130                 netfs_issue_write(wreq, cache)    130                 netfs_issue_write(wreq, cache);
131                                                   131 
132         _leave(" = 0");                           132         _leave(" = 0");
133         return 0;                                 133         return 0;
134 }                                                 134 }
135                                                   135 
136 /*                                                136 /*
137  * [DEPRECATED] Go through the buffer and writ    137  * [DEPRECATED] Go through the buffer and write any folios that are marked with
138  * the third mark to the cache.                   138  * the third mark to the cache.
139  */                                               139  */
140 void netfs_pgpriv2_write_to_the_cache(struct n    140 void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
141 {                                                 141 {
142         struct netfs_io_request *wreq;            142         struct netfs_io_request *wreq;
143         struct folio_queue *folioq;               143         struct folio_queue *folioq;
144         struct folio *folio;                      144         struct folio *folio;
145         int error = 0;                            145         int error = 0;
146         int slot = 0;                             146         int slot = 0;
147                                                   147 
148         _enter("");                               148         _enter("");
149                                                   149 
150         if (!fscache_resources_valid(&rreq->ca    150         if (!fscache_resources_valid(&rreq->cache_resources))
151                 goto couldnt_start;               151                 goto couldnt_start;
152                                                   152 
153         /* Need the first folio to be able to     153         /* Need the first folio to be able to set up the op. */
154         for (folioq = rreq->buffer; folioq; fo    154         for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
155                 if (folioq->marks3) {             155                 if (folioq->marks3) {
156                         slot = __ffs(folioq->m    156                         slot = __ffs(folioq->marks3);
157                         break;                    157                         break;
158                 }                                 158                 }
159         }                                         159         }
160         if (!folioq)                              160         if (!folioq)
161                 return;                           161                 return;
162         folio = folioq_folio(folioq, slot);       162         folio = folioq_folio(folioq, slot);
163                                                   163 
164         wreq = netfs_create_write_req(rreq->ma    164         wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
165                                       NETFS_PG    165                                       NETFS_PGPRIV2_COPY_TO_CACHE);
166         if (IS_ERR(wreq)) {                       166         if (IS_ERR(wreq)) {
167                 kleave(" [create %ld]", PTR_ER    167                 kleave(" [create %ld]", PTR_ERR(wreq));
168                 goto couldnt_start;               168                 goto couldnt_start;
169         }                                         169         }
170                                                   170 
171         trace_netfs_write(wreq, netfs_write_tr    171         trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
172         netfs_stat(&netfs_n_wh_copy_to_cache);    172         netfs_stat(&netfs_n_wh_copy_to_cache);
173                                                   173 
174         for (;;) {                                174         for (;;) {
175                 error = netfs_pgpriv2_copy_fol    175                 error = netfs_pgpriv2_copy_folio(wreq, folio);
176                 if (error < 0)                    176                 if (error < 0)
177                         break;                    177                         break;
178                                                   178 
179                 folioq_unmark3(folioq, slot);     179                 folioq_unmark3(folioq, slot);
180                 if (!folioq->marks3) {            180                 if (!folioq->marks3) {
181                         folioq = folioq->next;    181                         folioq = folioq->next;
182                         if (!folioq)              182                         if (!folioq)
183                                 break;            183                                 break;
184                 }                                 184                 }
185                                                   185 
186                 slot = __ffs(folioq->marks3);     186                 slot = __ffs(folioq->marks3);
187                 folio = folioq_folio(folioq, s    187                 folio = folioq_folio(folioq, slot);
188         }                                         188         }
189                                                   189 
190         netfs_issue_write(wreq, &wreq->io_stre    190         netfs_issue_write(wreq, &wreq->io_streams[1]);
191         smp_wmb(); /* Write lists before ALL_Q    191         smp_wmb(); /* Write lists before ALL_QUEUED. */
192         set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->    192         set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
193                                                   193 
194         netfs_put_request(wreq, false, netfs_r    194         netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
195         _leave(" = %d", error);                   195         _leave(" = %d", error);
196 couldnt_start:                                    196 couldnt_start:
197         netfs_pgpriv2_cancel(rreq->buffer);       197         netfs_pgpriv2_cancel(rreq->buffer);
198 }                                                 198 }
199                                                   199 
200 /*                                                200 /*
201  * [DEPRECATED] Remove the PG_private_2 mark f    201  * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
202  * copying.                                       202  * copying.
203  */                                               203  */
204 bool netfs_pgpriv2_unlock_copied_folios(struct    204 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
205 {                                                 205 {
206         struct folio_queue *folioq = wreq->buf    206         struct folio_queue *folioq = wreq->buffer;
207         unsigned long long collected_to = wreq    207         unsigned long long collected_to = wreq->collected_to;
208         unsigned int slot = wreq->buffer_head_    208         unsigned int slot = wreq->buffer_head_slot;
209         bool made_progress = false;               209         bool made_progress = false;
210                                                   210 
211         if (slot >= folioq_nr_slots(folioq)) {    211         if (slot >= folioq_nr_slots(folioq)) {
212                 folioq = netfs_delete_buffer_h    212                 folioq = netfs_delete_buffer_head(wreq);
213                 slot = 0;                         213                 slot = 0;
214         }                                         214         }
215                                                   215 
216         for (;;) {                                216         for (;;) {
217                 struct folio *folio;              217                 struct folio *folio;
218                 unsigned long long fpos, fend;    218                 unsigned long long fpos, fend;
219                 size_t fsize, flen;               219                 size_t fsize, flen;
220                                                   220 
221                 folio = folioq_folio(folioq, s    221                 folio = folioq_folio(folioq, slot);
222                 if (WARN_ONCE(!folio_test_priv    222                 if (WARN_ONCE(!folio_test_private_2(folio),
223                               "R=%08x: folio %    223                               "R=%08x: folio %lx is not marked private_2\n",
224                               wreq->debug_id,     224                               wreq->debug_id, folio->index))
225                         trace_netfs_folio(foli    225                         trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
226                                                   226 
227                 fpos = folio_pos(folio);          227                 fpos = folio_pos(folio);
228                 fsize = folio_size(folio);        228                 fsize = folio_size(folio);
229                 flen = fsize;                     229                 flen = fsize;
230                                                   230 
231                 fend = min_t(unsigned long lon    231                 fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
232                                                   232 
233                 trace_netfs_collect_folio(wreq    233                 trace_netfs_collect_folio(wreq, folio, fend, collected_to);
234                                                   234 
235                 /* Unlock any folio we've tran    235                 /* Unlock any folio we've transferred all of. */
236                 if (collected_to < fend)          236                 if (collected_to < fend)
237                         break;                    237                         break;
238                                                   238 
239                 trace_netfs_folio(folio, netfs    239                 trace_netfs_folio(folio, netfs_folio_trace_end_copy);
240                 folio_end_private_2(folio);       240                 folio_end_private_2(folio);
241                 wreq->cleaned_to = fpos + fsiz    241                 wreq->cleaned_to = fpos + fsize;
242                 made_progress = true;             242                 made_progress = true;
243                                                   243 
244                 /* Clean up the head folioq.      244                 /* Clean up the head folioq.  If we clear an entire folioq, then
245                  * we can get rid of it provid    245                  * we can get rid of it provided it's not also the tail folioq
246                  * being filled by the issuer.    246                  * being filled by the issuer.
247                  */                               247                  */
248                 folioq_clear(folioq, slot);       248                 folioq_clear(folioq, slot);
249                 slot++;                           249                 slot++;
250                 if (slot >= folioq_nr_slots(fo    250                 if (slot >= folioq_nr_slots(folioq)) {
251                         if (READ_ONCE(wreq->bu    251                         if (READ_ONCE(wreq->buffer_tail) == folioq)
252                                 break;            252                                 break;
253                         folioq = netfs_delete_    253                         folioq = netfs_delete_buffer_head(wreq);
254                         slot = 0;                 254                         slot = 0;
255                 }                                 255                 }
256                                                   256 
257                 if (fpos + fsize >= collected_    257                 if (fpos + fsize >= collected_to)
258                         break;                    258                         break;
259         }                                         259         }
260                                                   260 
261         wreq->buffer = folioq;                    261         wreq->buffer = folioq;
262         wreq->buffer_head_slot = slot;            262         wreq->buffer_head_slot = slot;
263         return made_progress;                     263         return made_progress;
264 }                                                 264 }
265                                                   265 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php