~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/netfs/read_retry.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/netfs/read_retry.c (Version linux-6.12-rc7) and /fs/netfs/read_retry.c (Version linux-6.11.7)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 
  2 /* Network filesystem read subrequest retrying    
  3  *                                                
  4  * Copyright (C) 2024 Red Hat, Inc. All Rights    
  5  * Written by David Howells (dhowells@redhat.c    
  6  */                                               
  7                                                   
  8 #include <linux/fs.h>                             
  9 #include <linux/slab.h>                           
 10 #include "internal.h"                             
 11                                                   
 12 static void netfs_reissue_read(struct netfs_io    
 13                                struct netfs_io    
 14 {                                                 
 15         struct iov_iter *io_iter = &subreq->io    
 16                                                   
 17         if (iov_iter_is_folioq(io_iter)) {        
 18                 subreq->curr_folioq = (struct     
 19                 subreq->curr_folioq_slot = io_    
 20                 subreq->curr_folio_order = sub    
 21         }                                         
 22                                                   
 23         atomic_inc(&rreq->nr_outstanding);        
 24         __set_bit(NETFS_SREQ_IN_PROGRESS, &sub    
 25         netfs_get_subrequest(subreq, netfs_sre    
 26         subreq->rreq->netfs_ops->issue_read(su    
 27 }                                                 
 28                                                   
 29 /*                                                
 30  * Go through the list of failed/short reads,     
 31  * need to switch failed cache reads to networ    
 32  */                                               
 33 static void netfs_retry_read_subrequests(struc    
 34 {                                                 
 35         struct netfs_io_subrequest *subreq;       
 36         struct netfs_io_stream *stream0 = &rre    
 37         LIST_HEAD(sublist);                       
 38         LIST_HEAD(queue);                         
 39                                                   
 40         _enter("R=%x", rreq->debug_id);           
 41                                                   
 42         if (list_empty(&rreq->subrequests))       
 43                 return;                           
 44                                                   
 45         if (rreq->netfs_ops->retry_request)       
 46                 rreq->netfs_ops->retry_request    
 47                                                   
 48         /* If there's no renegotiation to do,     
 49          * up to the first permanently failed     
 50          */                                       
 51         if (!rreq->netfs_ops->prepare_read &&     
 52             !test_bit(NETFS_RREQ_COPY_TO_CACHE    
 53                 struct netfs_io_subrequest *su    
 54                                                   
 55                 list_for_each_entry(subreq, &r    
 56                         if (test_bit(NETFS_SRE    
 57                                 break;            
 58                         if (__test_and_clear_b    
 59                                 netfs_reset_it    
 60                                 netfs_reissue_    
 61                         }                         
 62                 }                                 
 63                 return;                           
 64         }                                         
 65                                                   
 66         /* Okay, we need to renegotiate all th    
 67          * failed cache reads over to being do    
 68          * those also.  All fully successful s    
 69          * list and any spare data from those     
 70          *                                        
 71          * What we do is decant the list and r    
 72          * that we don't end up with donations    
 73          * populating with smaller subrequests    
 74          * we just launched finishes before we    
 75          * fill in rreq->prev_donated instead.    
 76                                                   
 77          * Note: Alternatively, we could split    
 78          * we reissue it and fix up the donati    
 79          */                                       
 80         list_splice_init(&rreq->subrequests, &    
 81                                                   
 82         do {                                      
 83                 struct netfs_io_subrequest *fr    
 84                 struct iov_iter source;           
 85                 unsigned long long start, len;    
 86                 size_t part, deferred_next_don    
 87                 bool boundary = false;            
 88                                                   
 89                 /* Go through the subreqs and     
 90                  * buffer that we then rejig (    
 91                  * rsize renegotiating) and re    
 92                  */                               
 93                 from = list_first_entry(&queue    
 94                 list_move_tail(&from->rreq_lin    
 95                 start = from->start + from->tr    
 96                 len   = from->len   - from->tr    
 97                                                   
 98                 _debug("from R=%08x[%x] s=%llx    
 99                        rreq->debug_id, from->d    
100                        from->start, from->cons    
101                                                   
102                 if (test_bit(NETFS_SREQ_FAILED    
103                     !test_bit(NETFS_SREQ_NEED_    
104                         goto abandon;             
105                                                   
106                 deferred_next_donated = from->    
107                 while ((subreq = list_first_en    
108                                 &queue, struct    
109                         if (subreq->start != s    
110                             subreq->transferre    
111                             !test_bit(NETFS_SR    
112                                 break;            
113                         list_move_tail(&subreq    
114                         len += subreq->len;       
115                         deferred_next_donated     
116                         if (test_bit(NETFS_SRE    
117                                 break;            
118                 }                                 
119                                                   
120                 _debug(" - range: %llx-%llx %l    
121                                                   
122                 /* Determine the set of buffer    
123                  * subreq gets a subset of a s    
124                  */                               
125                 netfs_reset_iter(from);           
126                 source = from->io_iter;           
127                 source.count = len;               
128                                                   
129                 /* Work through the sublist. *    
130                 while ((subreq = list_first_en    
131                                 &sublist, stru    
132                         list_del(&subreq->rreq    
133                                                   
134                         subreq->source  = NETF    
135                         subreq->start   = star    
136                         subreq->len     = len     
137                         stream0->sreq_max_len     
138                                                   
139                         __clear_bit(NETFS_SREQ    
140                         __set_bit(NETFS_SREQ_R    
141                                                   
142                         spin_lock_bh(&rreq->lo    
143                         list_add_tail(&subreq-    
144                         subreq->prev_donated +    
145                         rreq->prev_donated = 0    
146                         trace_netfs_sreq(subre    
147                         spin_unlock_bh(&rreq->    
148                                                   
149                         BUG_ON(!len);             
150                                                   
151                         /* Renegotiate max_len    
152                         if (rreq->netfs_ops->p    
153                                 trace_netfs_sr    
154                                 __set_bit(NETF    
155                         }                         
156                                                   
157                         part = umin(len, strea    
158                         if (unlikely(rreq->io_    
159                                 part = netfs_l    
160                         subreq->len = subreq->    
161                         subreq->io_iter = sour    
162                         iov_iter_truncate(&sub    
163                         iov_iter_advance(&sour    
164                         len -= part;              
165                         start += part;            
166                         if (!len) {               
167                                 if (boundary)     
168                                         __set_    
169                                 subreq->next_d    
170                         } else {                  
171                                 __clear_bit(NE    
172                                 subreq->next_d    
173                         }                         
174                                                   
175                         netfs_reissue_read(rre    
176                         if (!len)                 
177                                 break;            
178                                                   
179                         /* If we ran out of su    
180                         if (list_empty(&sublis    
181                                 subreq = netfs    
182                                 if (!subreq)      
183                                         goto a    
184                                 subreq->source    
185                                 subreq->start     
186                                                   
187                                 /* We get two     
188                                 netfs_put_subr    
189                                 trace_netfs_sr    
190                                 list_add_tail(    
191                         }                         
192                 }                                 
193                                                   
194                 /* If we managed to use fewer     
195                  * excess.                        
196                  */                               
197                 while ((subreq = list_first_en    
198                                 &sublist, stru    
199                         trace_netfs_sreq(subre    
200                         list_del(&subreq->rreq    
201                         netfs_put_subrequest(s    
202                 }                                 
203                                                   
204         } while (!list_empty(&queue));            
205                                                   
206         return;                                   
207                                                   
208         /* If we hit ENOMEM, fail all remainin    
209 abandon:                                          
210         list_splice_init(&sublist, &queue);       
211         list_for_each_entry(subreq, &queue, rr    
212                 if (!subreq->error)               
213                         subreq->error = -ENOME    
214                 __clear_bit(NETFS_SREQ_FAILED,    
215                 __clear_bit(NETFS_SREQ_NEED_RE    
216                 __clear_bit(NETFS_SREQ_RETRYIN    
217         }                                         
218         spin_lock_bh(&rreq->lock);                
219         list_splice_tail_init(&queue, &rreq->s    
220         spin_unlock_bh(&rreq->lock);              
221 }                                                 
222                                                   
223 /*                                                
224  * Retry reads.                                   
225  */                                               
226 void netfs_retry_reads(struct netfs_io_request    
227 {                                                 
228         trace_netfs_rreq(rreq, netfs_rreq_trac    
229                                                   
230         atomic_inc(&rreq->nr_outstanding);        
231                                                   
232         netfs_retry_read_subrequests(rreq);       
233                                                   
234         if (atomic_dec_and_test(&rreq->nr_outs    
235                 netfs_rreq_terminated(rreq, fa    
236 }                                                 
237                                                   
238 /*                                                
239  * Unlock any the pages that haven't been unlo    
240  * subrequests.                                   
241  */                                               
242 void netfs_unlock_abandoned_read_pages(struct     
243 {                                                 
244         struct folio_queue *p;                    
245                                                   
246         for (p = rreq->buffer; p; p = p->next)    
247                 for (int slot = 0; slot < foli    
248                         struct folio *folio =     
249                                                   
250                         if (folio && !folioq_i    
251                                 trace_netfs_fo    
252                                 folio_unlock(f    
253                         }                         
254                 }                                 
255         }                                         
256 }                                                 
257                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php