~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/netfs/read_retry.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/netfs/read_retry.c (Architecture sparc) and /fs/netfs/read_retry.c (Architecture sparc64)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /* Network filesystem read subrequest retrying      2 /* Network filesystem read subrequest retrying.
  3  *                                                  3  *
  4  * Copyright (C) 2024 Red Hat, Inc. All Rights      4  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
  5  * Written by David Howells (dhowells@redhat.c      5  * Written by David Howells (dhowells@redhat.com)
  6  */                                                 6  */
  7                                                     7 
  8 #include <linux/fs.h>                               8 #include <linux/fs.h>
  9 #include <linux/slab.h>                             9 #include <linux/slab.h>
 10 #include "internal.h"                              10 #include "internal.h"
 11                                                    11 
 12 static void netfs_reissue_read(struct netfs_io     12 static void netfs_reissue_read(struct netfs_io_request *rreq,
 13                                struct netfs_io     13                                struct netfs_io_subrequest *subreq)
 14 {                                                  14 {
 15         struct iov_iter *io_iter = &subreq->io     15         struct iov_iter *io_iter = &subreq->io_iter;
 16                                                    16 
 17         if (iov_iter_is_folioq(io_iter)) {         17         if (iov_iter_is_folioq(io_iter)) {
 18                 subreq->curr_folioq = (struct      18                 subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
 19                 subreq->curr_folioq_slot = io_     19                 subreq->curr_folioq_slot = io_iter->folioq_slot;
 20                 subreq->curr_folio_order = sub     20                 subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
 21         }                                          21         }
 22                                                    22 
 23         atomic_inc(&rreq->nr_outstanding);         23         atomic_inc(&rreq->nr_outstanding);
 24         __set_bit(NETFS_SREQ_IN_PROGRESS, &sub     24         __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
 25         netfs_get_subrequest(subreq, netfs_sre     25         netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
 26         subreq->rreq->netfs_ops->issue_read(su     26         subreq->rreq->netfs_ops->issue_read(subreq);
 27 }                                                  27 }
 28                                                    28 
 29 /*                                                 29 /*
 30  * Go through the list of failed/short reads,      30  * Go through the list of failed/short reads, retrying all retryable ones.  We
 31  * need to switch failed cache reads to networ     31  * need to switch failed cache reads to network downloads.
 32  */                                                32  */
 33 static void netfs_retry_read_subrequests(struc     33 static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
 34 {                                                  34 {
 35         struct netfs_io_subrequest *subreq;        35         struct netfs_io_subrequest *subreq;
 36         struct netfs_io_stream *stream0 = &rre     36         struct netfs_io_stream *stream0 = &rreq->io_streams[0];
 37         LIST_HEAD(sublist);                        37         LIST_HEAD(sublist);
 38         LIST_HEAD(queue);                          38         LIST_HEAD(queue);
 39                                                    39 
 40         _enter("R=%x", rreq->debug_id);            40         _enter("R=%x", rreq->debug_id);
 41                                                    41 
 42         if (list_empty(&rreq->subrequests))        42         if (list_empty(&rreq->subrequests))
 43                 return;                            43                 return;
 44                                                    44 
 45         if (rreq->netfs_ops->retry_request)        45         if (rreq->netfs_ops->retry_request)
 46                 rreq->netfs_ops->retry_request     46                 rreq->netfs_ops->retry_request(rreq, NULL);
 47                                                    47 
 48         /* If there's no renegotiation to do,      48         /* If there's no renegotiation to do, just resend each retryable subreq
 49          * up to the first permanently failed      49          * up to the first permanently failed one.
 50          */                                        50          */
 51         if (!rreq->netfs_ops->prepare_read &&      51         if (!rreq->netfs_ops->prepare_read &&
 52             !test_bit(NETFS_RREQ_COPY_TO_CACHE     52             !test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) {
 53                 struct netfs_io_subrequest *su     53                 struct netfs_io_subrequest *subreq;
 54                                                    54 
 55                 list_for_each_entry(subreq, &r     55                 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
 56                         if (test_bit(NETFS_SRE     56                         if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
 57                                 break;             57                                 break;
 58                         if (__test_and_clear_b     58                         if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
 59                                 netfs_reset_it     59                                 netfs_reset_iter(subreq);
 60                                 netfs_reissue_     60                                 netfs_reissue_read(rreq, subreq);
 61                         }                          61                         }
 62                 }                                  62                 }
 63                 return;                            63                 return;
 64         }                                          64         }
 65                                                    65 
 66         /* Okay, we need to renegotiate all th     66         /* Okay, we need to renegotiate all the download requests and flip any
 67          * failed cache reads over to being do     67          * failed cache reads over to being download requests and negotiate
 68          * those also.  All fully successful s     68          * those also.  All fully successful subreqs have been removed from the
 69          * list and any spare data from those      69          * list and any spare data from those has been donated.
 70          *                                         70          *
 71          * What we do is decant the list and r     71          * What we do is decant the list and rebuild it one subreq at a time so
 72          * that we don't end up with donations     72          * that we don't end up with donations jumping over a gap we're busy
 73          * populating with smaller subrequests     73          * populating with smaller subrequests.  In the event that the subreq
 74          * we just launched finishes before we     74          * we just launched finishes before we insert the next subreq, it'll
 75          * fill in rreq->prev_donated instead.     75          * fill in rreq->prev_donated instead.
 76                                                    76 
 77          * Note: Alternatively, we could split     77          * Note: Alternatively, we could split the tail subrequest right before
 78          * we reissue it and fix up the donati     78          * we reissue it and fix up the donations under lock.
 79          */                                        79          */
 80         list_splice_init(&rreq->subrequests, &     80         list_splice_init(&rreq->subrequests, &queue);
 81                                                    81 
 82         do {                                       82         do {
 83                 struct netfs_io_subrequest *fr     83                 struct netfs_io_subrequest *from;
 84                 struct iov_iter source;            84                 struct iov_iter source;
 85                 unsigned long long start, len;     85                 unsigned long long start, len;
 86                 size_t part, deferred_next_don     86                 size_t part, deferred_next_donated = 0;
 87                 bool boundary = false;             87                 bool boundary = false;
 88                                                    88 
 89                 /* Go through the subreqs and      89                 /* Go through the subreqs and find the next span of contiguous
 90                  * buffer that we then rejig (     90                  * buffer that we then rejig (cifs, for example, needs the
 91                  * rsize renegotiating) and re     91                  * rsize renegotiating) and reissue.
 92                  */                                92                  */
 93                 from = list_first_entry(&queue     93                 from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
 94                 list_move_tail(&from->rreq_lin     94                 list_move_tail(&from->rreq_link, &sublist);
 95                 start = from->start + from->tr     95                 start = from->start + from->transferred;
 96                 len   = from->len   - from->tr     96                 len   = from->len   - from->transferred;
 97                                                    97 
 98                 _debug("from R=%08x[%x] s=%llx     98                 _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
 99                        rreq->debug_id, from->d     99                        rreq->debug_id, from->debug_index,
100                        from->start, from->cons    100                        from->start, from->consumed, from->transferred, from->len);
101                                                   101 
102                 if (test_bit(NETFS_SREQ_FAILED    102                 if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
103                     !test_bit(NETFS_SREQ_NEED_    103                     !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
104                         goto abandon;             104                         goto abandon;
105                                                   105 
106                 deferred_next_donated = from->    106                 deferred_next_donated = from->next_donated;
107                 while ((subreq = list_first_en    107                 while ((subreq = list_first_entry_or_null(
108                                 &queue, struct    108                                 &queue, struct netfs_io_subrequest, rreq_link))) {
109                         if (subreq->start != s    109                         if (subreq->start != start + len ||
110                             subreq->transferre    110                             subreq->transferred > 0 ||
111                             !test_bit(NETFS_SR    111                             !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
112                                 break;            112                                 break;
113                         list_move_tail(&subreq    113                         list_move_tail(&subreq->rreq_link, &sublist);
114                         len += subreq->len;       114                         len += subreq->len;
115                         deferred_next_donated     115                         deferred_next_donated = subreq->next_donated;
116                         if (test_bit(NETFS_SRE    116                         if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
117                                 break;            117                                 break;
118                 }                                 118                 }
119                                                   119 
120                 _debug(" - range: %llx-%llx %l    120                 _debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
121                                                   121 
122                 /* Determine the set of buffer    122                 /* Determine the set of buffers we're going to use.  Each
123                  * subreq gets a subset of a s    123                  * subreq gets a subset of a single overall contiguous buffer.
124                  */                               124                  */
125                 netfs_reset_iter(from);           125                 netfs_reset_iter(from);
126                 source = from->io_iter;           126                 source = from->io_iter;
127                 source.count = len;               127                 source.count = len;
128                                                   128 
129                 /* Work through the sublist. *    129                 /* Work through the sublist. */
130                 while ((subreq = list_first_en    130                 while ((subreq = list_first_entry_or_null(
131                                 &sublist, stru    131                                 &sublist, struct netfs_io_subrequest, rreq_link))) {
132                         list_del(&subreq->rreq    132                         list_del(&subreq->rreq_link);
133                                                   133 
134                         subreq->source  = NETF    134                         subreq->source  = NETFS_DOWNLOAD_FROM_SERVER;
135                         subreq->start   = star    135                         subreq->start   = start - subreq->transferred;
136                         subreq->len     = len     136                         subreq->len     = len   + subreq->transferred;
137                         stream0->sreq_max_len     137                         stream0->sreq_max_len = subreq->len;
138                                                   138 
139                         __clear_bit(NETFS_SREQ    139                         __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
140                         __set_bit(NETFS_SREQ_R    140                         __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
141                                                   141 
142                         spin_lock_bh(&rreq->lo    142                         spin_lock_bh(&rreq->lock);
143                         list_add_tail(&subreq-    143                         list_add_tail(&subreq->rreq_link, &rreq->subrequests);
144                         subreq->prev_donated +    144                         subreq->prev_donated += rreq->prev_donated;
145                         rreq->prev_donated = 0    145                         rreq->prev_donated = 0;
146                         trace_netfs_sreq(subre    146                         trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
147                         spin_unlock_bh(&rreq->    147                         spin_unlock_bh(&rreq->lock);
148                                                   148 
149                         BUG_ON(!len);             149                         BUG_ON(!len);
150                                                   150 
151                         /* Renegotiate max_len    151                         /* Renegotiate max_len (rsize) */
152                         if (rreq->netfs_ops->p    152                         if (rreq->netfs_ops->prepare_read(subreq) < 0) {
153                                 trace_netfs_sr    153                                 trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
154                                 __set_bit(NETF    154                                 __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
155                         }                         155                         }
156                                                   156 
157                         part = umin(len, strea    157                         part = umin(len, stream0->sreq_max_len);
158                         if (unlikely(rreq->io_    158                         if (unlikely(rreq->io_streams[0].sreq_max_segs))
159                                 part = netfs_l    159                                 part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
160                         subreq->len = subreq->    160                         subreq->len = subreq->transferred + part;
161                         subreq->io_iter = sour    161                         subreq->io_iter = source;
162                         iov_iter_truncate(&sub    162                         iov_iter_truncate(&subreq->io_iter, part);
163                         iov_iter_advance(&sour    163                         iov_iter_advance(&source, part);
164                         len -= part;              164                         len -= part;
165                         start += part;            165                         start += part;
166                         if (!len) {               166                         if (!len) {
167                                 if (boundary)     167                                 if (boundary)
168                                         __set_    168                                         __set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
169                                 subreq->next_d    169                                 subreq->next_donated = deferred_next_donated;
170                         } else {                  170                         } else {
171                                 __clear_bit(NE    171                                 __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
172                                 subreq->next_d    172                                 subreq->next_donated = 0;
173                         }                         173                         }
174                                                   174 
175                         netfs_reissue_read(rre    175                         netfs_reissue_read(rreq, subreq);
176                         if (!len)                 176                         if (!len)
177                                 break;            177                                 break;
178                                                   178 
179                         /* If we ran out of su    179                         /* If we ran out of subrequests, allocate another. */
180                         if (list_empty(&sublis    180                         if (list_empty(&sublist)) {
181                                 subreq = netfs    181                                 subreq = netfs_alloc_subrequest(rreq);
182                                 if (!subreq)      182                                 if (!subreq)
183                                         goto a    183                                         goto abandon;
184                                 subreq->source    184                                 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
185                                 subreq->start     185                                 subreq->start = start;
186                                                   186 
187                                 /* We get two     187                                 /* We get two refs, but need just one. */
188                                 netfs_put_subr    188                                 netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
189                                 trace_netfs_sr    189                                 trace_netfs_sreq(subreq, netfs_sreq_trace_split);
190                                 list_add_tail(    190                                 list_add_tail(&subreq->rreq_link, &sublist);
191                         }                         191                         }
192                 }                                 192                 }
193                                                   193 
194                 /* If we managed to use fewer     194                 /* If we managed to use fewer subreqs, we can discard the
195                  * excess.                        195                  * excess.
196                  */                               196                  */
197                 while ((subreq = list_first_en    197                 while ((subreq = list_first_entry_or_null(
198                                 &sublist, stru    198                                 &sublist, struct netfs_io_subrequest, rreq_link))) {
199                         trace_netfs_sreq(subre    199                         trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
200                         list_del(&subreq->rreq    200                         list_del(&subreq->rreq_link);
201                         netfs_put_subrequest(s    201                         netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
202                 }                                 202                 }
203                                                   203 
204         } while (!list_empty(&queue));            204         } while (!list_empty(&queue));
205                                                   205 
206         return;                                   206         return;
207                                                   207 
208         /* If we hit ENOMEM, fail all remainin    208         /* If we hit ENOMEM, fail all remaining subrequests */
209 abandon:                                          209 abandon:
210         list_splice_init(&sublist, &queue);       210         list_splice_init(&sublist, &queue);
211         list_for_each_entry(subreq, &queue, rr    211         list_for_each_entry(subreq, &queue, rreq_link) {
212                 if (!subreq->error)               212                 if (!subreq->error)
213                         subreq->error = -ENOME    213                         subreq->error = -ENOMEM;
214                 __clear_bit(NETFS_SREQ_FAILED,    214                 __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
215                 __clear_bit(NETFS_SREQ_NEED_RE    215                 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
216                 __clear_bit(NETFS_SREQ_RETRYIN    216                 __clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
217         }                                         217         }
218         spin_lock_bh(&rreq->lock);                218         spin_lock_bh(&rreq->lock);
219         list_splice_tail_init(&queue, &rreq->s    219         list_splice_tail_init(&queue, &rreq->subrequests);
220         spin_unlock_bh(&rreq->lock);              220         spin_unlock_bh(&rreq->lock);
221 }                                                 221 }
222                                                   222 
223 /*                                                223 /*
224  * Retry reads.                                   224  * Retry reads.
225  */                                               225  */
226 void netfs_retry_reads(struct netfs_io_request    226 void netfs_retry_reads(struct netfs_io_request *rreq)
227 {                                                 227 {
228         trace_netfs_rreq(rreq, netfs_rreq_trac    228         trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
229                                                   229 
230         atomic_inc(&rreq->nr_outstanding);        230         atomic_inc(&rreq->nr_outstanding);
231                                                   231 
232         netfs_retry_read_subrequests(rreq);       232         netfs_retry_read_subrequests(rreq);
233                                                   233 
234         if (atomic_dec_and_test(&rreq->nr_outs    234         if (atomic_dec_and_test(&rreq->nr_outstanding))
235                 netfs_rreq_terminated(rreq, fa    235                 netfs_rreq_terminated(rreq, false);
236 }                                                 236 }
237                                                   237 
238 /*                                                238 /*
239  * Unlock any the pages that haven't been unlo    239  * Unlock any the pages that haven't been unlocked yet due to abandoned
240  * subrequests.                                   240  * subrequests.
241  */                                               241  */
242 void netfs_unlock_abandoned_read_pages(struct     242 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
243 {                                                 243 {
244         struct folio_queue *p;                    244         struct folio_queue *p;
245                                                   245 
246         for (p = rreq->buffer; p; p = p->next)    246         for (p = rreq->buffer; p; p = p->next) {
247                 for (int slot = 0; slot < foli    247                 for (int slot = 0; slot < folioq_count(p); slot++) {
248                         struct folio *folio =     248                         struct folio *folio = folioq_folio(p, slot);
249                                                   249 
250                         if (folio && !folioq_i    250                         if (folio && !folioq_is_marked2(p, slot)) {
251                                 trace_netfs_fo    251                                 trace_netfs_folio(folio, netfs_folio_trace_abandon);
252                                 folio_unlock(f    252                                 folio_unlock(folio);
253                         }                         253                         }
254                 }                                 254                 }
255         }                                         255         }
256 }                                                 256 }
257                                                   257 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php