1 // SPDX-License-Identifier: GPL-2.0-only 1 2 /* Read with PG_private_2 [DEPRECATED]. 3 * 4 * Copyright (C) 2024 Red Hat, Inc. All Rights 5 * Written by David Howells (dhowells@redhat.c 6 */ 7 8 #include <linux/export.h> 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/task_io_accounting_ops.h> 14 #include "internal.h" 15 16 /* 17 * [DEPRECATED] Mark page as requiring copy-to 18 * third mark in the folio queue is used to in 19 * writing. 20 */ 21 void netfs_pgpriv2_mark_copy_to_cache(struct n 22 struct n 23 struct f 24 int slot 25 { 26 struct folio *folio = folioq_folio(fol 27 28 trace_netfs_folio(folio, netfs_folio_t 29 folio_start_private_2(folio); 30 folioq_mark3(folioq, slot); 31 } 32 33 /* 34 * [DEPRECATED] Cancel PG_private_2 on all mar 35 * unrecoverable error. 36 */ 37 static void netfs_pgpriv2_cancel(struct folio_ 38 { 39 struct folio *folio; 40 int slot; 41 42 while (folioq) { 43 if (!folioq->marks3) { 44 folioq = folioq->next; 45 continue; 46 } 47 48 slot = __ffs(folioq->marks3); 49 folio = folioq_folio(folioq, s 50 51 trace_netfs_folio(folio, netfs 52 folio_end_private_2(folio); 53 folioq_unmark3(folioq, slot); 54 } 55 } 56 57 /* 58 * [DEPRECATED] Copy a folio to the cache with 59 */ 60 static int netfs_pgpriv2_copy_folio(struct net 61 { 62 struct netfs_io_stream *cache = &wreq 63 size_t fsize = folio_size(folio), flen 64 loff_t fpos = folio_pos(folio), i_size 65 bool to_eof = false; 66 67 _enter(""); 68 69 /* netfs_perform_write() may shift i_s 70 * of the page to beyond it, but canno 71 * page since we have it locked. 72 */ 73 i_size = i_size_read(wreq->inode); 74 75 if (fpos >= i_size) { 76 /* mmap beyond eof. */ 77 _debug("beyond eof"); 78 folio_end_private_2(folio); 79 return 0; 80 } 81 82 if (fpos + fsize > wreq->i_size) 83 wreq->i_size = i_size; 84 85 if (flen > i_size - fpos) { 86 flen = i_size - fpos; 87 to_eof = true; 88 } else if (flen == i_size - fpos) { 89 to_eof = true; 90 } 91 92 _debug("folio %zx %zx", flen, fsize); 93 94 trace_netfs_folio(folio, netfs_folio_t 95 96 /* Attach the folio to the rolling buf 97 if (netfs_buffer_append_folio(wreq, fo 98 return -ENOMEM; 99 100 cache->submit_extendable_to = fsize; 101 cache->submit_off = 0; 102 cache->submit_len = flen; 103 104 /* Attach the folio to one or more sub 105 * could end up with thousands of subr 106 * but we might need to wait during th 107 * network resources (eg. SMB credits) 108 */ 109 do { 110 ssize_t part; 111 112 wreq->io_iter.iov_offset = cac 113 114 atomic64_set(&wreq->issued_to, 115 cache->submit_extendable_to = 116 part = netfs_advance_write(wre 117 cac 118 cache->submit_off += part; 119 if (part > cache->submit_len) 120 cache->submit_len = 0; 121 else 122 cache->submit_len -= p 123 } while (cache->submit_len > 0); 124 125 wreq->io_iter.iov_offset = 0; 126 iov_iter_advance(&wreq->io_iter, fsize 127 atomic64_set(&wreq->issued_to, fpos + 128 129 if (flen < fsize) 130 netfs_issue_write(wreq, cache) 131 132 _leave(" = 0"); 133 return 0; 134 } 135 136 /* 137 * [DEPRECATED] Go through the buffer and writ 138 * the third mark to the cache. 139 */ 140 void netfs_pgpriv2_write_to_the_cache(struct n 141 { 142 struct netfs_io_request *wreq; 143 struct folio_queue *folioq; 144 struct folio *folio; 145 int error = 0; 146 int slot = 0; 147 148 _enter(""); 149 150 if (!fscache_resources_valid(&rreq->ca 151 goto couldnt_start; 152 153 /* Need the first folio to be able to 154 for (folioq = rreq->buffer; folioq; fo 155 if (folioq->marks3) { 156 slot = __ffs(folioq->m 157 break; 158 } 159 } 160 if (!folioq) 161 return; 162 folio = folioq_folio(folioq, slot); 163 164 wreq = netfs_create_write_req(rreq->ma 165 NETFS_PG 166 if (IS_ERR(wreq)) { 167 kleave(" [create %ld]", PTR_ER 168 goto couldnt_start; 169 } 170 171 trace_netfs_write(wreq, netfs_write_tr 172 netfs_stat(&netfs_n_wh_copy_to_cache); 173 174 for (;;) { 175 error = netfs_pgpriv2_copy_fol 176 if (error < 0) 177 break; 178 179 folioq_unmark3(folioq, slot); 180 if (!folioq->marks3) { 181 folioq = folioq->next; 182 if (!folioq) 183 break; 184 } 185 186 slot = __ffs(folioq->marks3); 187 folio = folioq_folio(folioq, s 188 } 189 190 netfs_issue_write(wreq, &wreq->io_stre 191 smp_wmb(); /* Write lists before ALL_Q 192 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq-> 193 194 netfs_put_request(wreq, false, netfs_r 195 _leave(" = %d", error); 196 couldnt_start: 197 netfs_pgpriv2_cancel(rreq->buffer); 198 } 199 200 /* 201 * [DEPRECATED] Remove the PG_private_2 mark f 202 * copying. 203 */ 204 bool netfs_pgpriv2_unlock_copied_folios(struct 205 { 206 struct folio_queue *folioq = wreq->buf 207 unsigned long long collected_to = wreq 208 unsigned int slot = wreq->buffer_head_ 209 bool made_progress = false; 210 211 if (slot >= folioq_nr_slots(folioq)) { 212 folioq = netfs_delete_buffer_h 213 slot = 0; 214 } 215 216 for (;;) { 217 struct folio *folio; 218 unsigned long long fpos, fend; 219 size_t fsize, flen; 220 221 folio = folioq_folio(folioq, s 222 if (WARN_ONCE(!folio_test_priv 223 "R=%08x: folio % 224 wreq->debug_id, 225 trace_netfs_folio(foli 226 227 fpos = folio_pos(folio); 228 fsize = folio_size(folio); 229 flen = fsize; 230 231 fend = min_t(unsigned long lon 232 233 trace_netfs_collect_folio(wreq 234 235 /* Unlock any folio we've tran 236 if (collected_to < fend) 237 break; 238 239 trace_netfs_folio(folio, netfs 240 folio_end_private_2(folio); 241 wreq->cleaned_to = fpos + fsiz 242 made_progress = true; 243 244 /* Clean up the head folioq. 245 * we can get rid of it provid 246 * being filled by the issuer. 247 */ 248 folioq_clear(folioq, slot); 249 slot++; 250 if (slot >= folioq_nr_slots(fo 251 if (READ_ONCE(wreq->bu 252 break; 253 folioq = netfs_delete_ 254 slot = 0; 255 } 256 257 if (fpos + fsize >= collected_ 258 break; 259 } 260 261 wreq->buffer = folioq; 262 wreq->buffer_head_slot = slot; 263 return made_progress; 264 } 265
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.