1 /* SPDX-License-Identifier: GPL-2.0-or-later * 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Network filesystem support services. 2 /* Network filesystem support services. 3 * 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.c 5 * Written by David Howells (dhowells@redhat.com) 6 * 6 * 7 * See: 7 * See: 8 * 8 * 9 * Documentation/filesystems/netfs_librar 9 * Documentation/filesystems/netfs_library.rst 10 * 10 * 11 * for a description of the network filesystem 11 * for a description of the network filesystem interface declared here. 12 */ 12 */ 13 13 14 #ifndef _LINUX_NETFS_H 14 #ifndef _LINUX_NETFS_H 15 #define _LINUX_NETFS_H 15 #define _LINUX_NETFS_H 16 16 17 #include <linux/workqueue.h> 17 #include <linux/workqueue.h> 18 #include <linux/fs.h> 18 #include <linux/fs.h> 19 #include <linux/pagemap.h> 19 #include <linux/pagemap.h> 20 #include <linux/uio.h> 20 #include <linux/uio.h> 21 21 22 enum netfs_sreq_ref_trace; 22 enum netfs_sreq_ref_trace; 23 typedef struct mempool_s mempool_t; !! 23 >> 24 /* >> 25 * Overload PG_private_2 to give us PG_fscache - this is used to indicate that >> 26 * a page is currently backed by a local disk cache >> 27 */ >> 28 #define folio_test_fscache(folio) folio_test_private_2(folio) >> 29 #define PageFsCache(page) PagePrivate2((page)) >> 30 #define SetPageFsCache(page) SetPagePrivate2((page)) >> 31 #define ClearPageFsCache(page) ClearPagePrivate2((page)) >> 32 #define TestSetPageFsCache(page) TestSetPagePrivate2((page)) >> 33 #define TestClearPageFsCache(page) TestClearPagePrivate2((page)) 24 34 25 /** 35 /** 26 * folio_start_private_2 - Start an fscache wr !! 36 * folio_start_fscache - Start an fscache write on a folio. 27 * @folio: The folio. 37 * @folio: The folio. 28 * 38 * 29 * Call this function before writing a folio t 39 * Call this function before writing a folio to a local cache. Starting a 30 * second write before the first one finishes 40 * second write before the first one finishes is not allowed. 31 * << 32 * Note that this should no longer be used. << 33 */ 41 */ 34 static inline void folio_start_private_2(struc !! 42 static inline void folio_start_fscache(struct folio *folio) 35 { 43 { 36 VM_BUG_ON_FOLIO(folio_test_private_2(f 44 VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio); 37 folio_get(folio); 45 folio_get(folio); 38 folio_set_private_2(folio); 46 folio_set_private_2(folio); 39 } 47 } 40 48 >> 49 /** >> 50 * folio_end_fscache - End an fscache write on a folio. >> 51 * @folio: The folio. >> 52 * >> 53 * Call this function after the folio has been written to the local cache. >> 54 * This will wake any sleepers waiting on this folio. >> 55 */ >> 56 static inline void folio_end_fscache(struct folio *folio) >> 57 { >> 58 folio_end_private_2(folio); >> 59 } >> 60 >> 61 /** >> 62 * folio_wait_fscache - Wait for an fscache write on this folio to end. >> 63 * @folio: The folio. >> 64 * >> 65 * If this folio is currently being written to a local cache, wait for >> 66 * the write to finish. Another write may start after this one finishes, >> 67 * unless the caller holds the folio lock. >> 68 */ >> 69 static inline void folio_wait_fscache(struct folio *folio) >> 70 { >> 71 folio_wait_private_2(folio); >> 72 } >> 73 >> 74 /** >> 75 * folio_wait_fscache_killable - Wait for an fscache write on this folio to end. >> 76 * @folio: The folio. >> 77 * >> 78 * If this folio is currently being written to a local cache, wait >> 79 * for the write to finish or for a fatal signal to be received. >> 80 * Another write may start after this one finishes, unless the caller >> 81 * holds the folio lock. >> 82 * >> 83 * Return: >> 84 * - 0 if successful. >> 85 * - -EINTR if a fatal signal was encountered. >> 86 */ >> 87 static inline int folio_wait_fscache_killable(struct folio *folio) >> 88 { >> 89 return folio_wait_private_2_killable(folio); >> 90 } >> 91 >> 92 static inline void set_page_fscache(struct page *page) >> 93 { >> 94 folio_start_fscache(page_folio(page)); >> 95 } >> 96 >> 97 static inline void end_page_fscache(struct page *page) >> 98 { >> 99 folio_end_private_2(page_folio(page)); >> 100 } >> 101 >> 102 static inline void wait_on_page_fscache(struct page *page) >> 103 { >> 104 folio_wait_private_2(page_folio(page)); >> 105 } >> 106 >> 107 static inline int wait_on_page_fscache_killable(struct page *page) >> 108 { >> 109 return folio_wait_private_2_killable(page_folio(page)); >> 110 } >> 111 41 enum netfs_io_source { 112 enum netfs_io_source { 42 NETFS_SOURCE_UNKNOWN, << 43 NETFS_FILL_WITH_ZEROES, 113 NETFS_FILL_WITH_ZEROES, 44 NETFS_DOWNLOAD_FROM_SERVER, 114 NETFS_DOWNLOAD_FROM_SERVER, 45 NETFS_READ_FROM_CACHE, 115 NETFS_READ_FROM_CACHE, 46 NETFS_INVALID_READ, 116 NETFS_INVALID_READ, 47 NETFS_UPLOAD_TO_SERVER, << 48 NETFS_WRITE_TO_CACHE, << 49 NETFS_INVALID_WRITE, << 50 } __mode(byte); 117 } __mode(byte); 51 118 52 typedef void (*netfs_io_terminated_t)(void *pr 119 typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, 53 bool was 120 bool was_async); 54 121 55 /* 122 /* 56 * Per-inode context. This wraps the VFS inod 123 * Per-inode context. This wraps the VFS inode. 57 */ 124 */ 58 struct netfs_inode { 125 struct netfs_inode { 59 struct inode inode; 126 struct inode inode; /* The VFS inode */ 60 const struct netfs_request_ops *ops; 127 const struct netfs_request_ops *ops; 61 #if IS_ENABLED(CONFIG_FSCACHE) 128 #if IS_ENABLED(CONFIG_FSCACHE) 62 struct fscache_cookie *cache; 129 struct fscache_cookie *cache; 63 #endif 130 #endif 64 struct mutex wb_lock; << 65 loff_t remote_i_size; 131 loff_t remote_i_size; /* Size of the remote file */ 66 loff_t zero_point; << 67 << 68 atomic_t io_count; << 69 unsigned long flags; << 70 #define NETFS_ICTX_ODIRECT 0 << 71 #define NETFS_ICTX_UNBUFFERED 1 << 72 #define NETFS_ICTX_WRITETHROUGH 2 << 73 #define NETFS_ICTX_MODIFIED_ATTR 3 << 74 }; << 75 << 76 /* << 77 * A netfs group - for instance a ceph snap. << 78 * pages marked with a group must be flushed b << 79 * the domain of another group. << 80 */ << 81 struct netfs_group { << 82 refcount_t ref; << 83 void (*free)(struct netfs_group *netfs << 84 }; << 85 << 86 /* << 87 * Information about a dirty page (attached on << 88 * folio->private << 89 */ << 90 struct netfs_folio { << 91 struct netfs_group *netfs_group; << 92 unsigned int dirty_offset; << 93 unsigned int dirty_len; << 94 }; << 95 #define NETFS_FOLIO_INFO 0x1UL /* OR' << 96 #define NETFS_FOLIO_COPY_TO_CACHE ((struct net << 97 << 98 static inline bool netfs_is_folio_info(const v << 99 { << 100 return (unsigned long)priv & NETFS_FOL << 101 } << 102 << 103 static inline struct netfs_folio *__netfs_foli << 104 { << 105 if (netfs_is_folio_info(priv)) << 106 return (struct netfs_folio *)( << 107 return NULL; << 108 } << 109 << 110 static inline struct netfs_folio *netfs_folio_ << 111 { << 112 return __netfs_folio_info(folio_get_pr << 113 } << 114 << 115 static inline struct netfs_group *netfs_folio_ << 116 { << 117 struct netfs_folio *finfo; << 118 void *priv = folio_get_private(folio); << 119 << 120 finfo = netfs_folio_info(folio); << 121 if (finfo) << 122 return finfo->netfs_group; << 123 return priv; << 124 } << 125 << 126 /* << 127 * Stream of I/O subrequests going to a partic << 128 * server or the local cache. This is mainly << 129 * have to write to multiple destinations conc << 130 */ << 131 struct netfs_io_stream { << 132 /* Submission tracking */ << 133 struct netfs_io_subrequest *construct; << 134 size_t sreq_max_len; << 135 unsigned int sreq_max_segs; << 136 unsigned int submit_off; << 137 unsigned int submit_len; << 138 unsigned int submit_extenda << 139 void (*prepare_write)(struct netfs_io_ << 140 void (*issue_write)(struct netfs_io_su << 141 /* Collection tracking */ << 142 struct list_head subrequests; << 143 struct netfs_io_subrequest *front; << 144 unsigned long long collected_to; << 145 size_t transferred; << 146 enum netfs_io_source source; << 147 unsigned short error; << 148 unsigned char stream_nr; << 149 bool avail; << 150 bool active; << 151 bool need_retry; << 152 bool failed; << 153 }; 132 }; 154 133 155 /* 134 /* 156 * Resources required to do operations on a ca 135 * Resources required to do operations on a cache. 157 */ 136 */ 158 struct netfs_cache_resources { 137 struct netfs_cache_resources { 159 const struct netfs_cache_ops *ops; 138 const struct netfs_cache_ops *ops; 160 void *cache 139 void *cache_priv; 161 void *cache 140 void *cache_priv2; 162 unsigned int debug_ 141 unsigned int debug_id; /* Cookie debug ID */ 163 unsigned int inval_ 142 unsigned int inval_counter; /* object->inval_counter at begin_op */ 164 }; 143 }; 165 144 166 /* 145 /* 167 * Descriptor for a single component subreques !! 146 * Descriptor for a single component subrequest. 168 * individual read/write from/to a server, a c << 169 * << 170 * The buffer iterator is persistent for the l << 171 * the pages it points to can be relied on to << 172 */ 147 */ 173 struct netfs_io_subrequest { 148 struct netfs_io_subrequest { 174 struct netfs_io_request *rreq; 149 struct netfs_io_request *rreq; /* Supervising I/O request */ 175 struct work_struct work; << 176 struct list_head rreq_link; 150 struct list_head rreq_link; /* Link in rreq->subrequests */ 177 struct iov_iter io_iter; !! 151 loff_t start; /* Where to start the I/O */ 178 unsigned long long start; << 179 size_t len; 152 size_t len; /* Size of the I/O */ 180 size_t transferred; 153 size_t transferred; /* Amount of data transferred */ 181 size_t consumed; << 182 size_t prev_donated; << 183 size_t next_donated; << 184 refcount_t ref; 154 refcount_t ref; 185 short error; 155 short error; /* 0 or error that occurred */ 186 unsigned short debug_index; 156 unsigned short debug_index; /* Index in list (for debugging output) */ 187 unsigned int nr_segs; << 188 enum netfs_io_source source; 157 enum netfs_io_source source; /* Where to read from/write to */ 189 unsigned char stream_nr; << 190 unsigned char curr_folioq_sl << 191 unsigned char curr_folio_ord << 192 struct folio_queue *curr_folioq; << 193 unsigned long flags; 158 unsigned long flags; 194 #define NETFS_SREQ_COPY_TO_CACHE 0 159 #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ 195 #define NETFS_SREQ_CLEAR_TAIL 1 160 #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ >> 161 #define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */ 196 #define NETFS_SREQ_SEEK_DATA_READ 3 162 #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ 197 #define NETFS_SREQ_NO_PROGRESS 4 163 #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ 198 #define NETFS_SREQ_ONDEMAND 5 164 #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ 199 #define NETFS_SREQ_BOUNDARY 6 << 200 #define NETFS_SREQ_HIT_EOF 7 << 201 #define NETFS_SREQ_IN_PROGRESS 8 << 202 #define NETFS_SREQ_NEED_RETRY 9 << 203 #define NETFS_SREQ_RETRYING 10 << 204 #define NETFS_SREQ_FAILED 11 << 205 }; 165 }; 206 166 207 enum netfs_io_origin { 167 enum netfs_io_origin { 208 NETFS_READAHEAD, /* Thi 168 NETFS_READAHEAD, /* This read was triggered by readahead */ 209 NETFS_READPAGE, /* Thi 169 NETFS_READPAGE, /* This read is a synchronous read */ 210 NETFS_READ_GAPS, /* Thi << 211 NETFS_READ_FOR_WRITE, /* Thi 170 NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ 212 NETFS_DIO_READ, /* Thi << 213 NETFS_WRITEBACK, /* Thi << 214 NETFS_WRITETHROUGH, /* Thi << 215 NETFS_UNBUFFERED_WRITE, /* Thi << 216 NETFS_DIO_WRITE, /* Thi << 217 NETFS_PGPRIV2_COPY_TO_CACHE, /* [DE << 218 nr__netfs_io_origin << 219 } __mode(byte); 171 } __mode(byte); 220 172 221 /* 173 /* 222 * Descriptor for an I/O helper request. This 174 * Descriptor for an I/O helper request. This is used to make multiple I/O 223 * operations to a variety of data stores and 175 * operations to a variety of data stores and then stitch the result together. 224 */ 176 */ 225 struct netfs_io_request { 177 struct netfs_io_request { 226 union { !! 178 struct work_struct work; 227 struct work_struct work; << 228 struct rcu_head rcu; << 229 }; << 230 struct inode *inode; 179 struct inode *inode; /* The file being accessed */ 231 struct address_space *mapping; 180 struct address_space *mapping; /* The mapping being accessed */ 232 struct kiocb *iocb; << 233 struct netfs_cache_resources cache_res 181 struct netfs_cache_resources cache_resources; 234 struct readahead_control *ractl; << 235 struct list_head proc_link; << 236 struct list_head subrequests; 182 struct list_head subrequests; /* Contributory I/O operations */ 237 struct netfs_io_stream io_streams[2]; << 238 #define NR_IO_STREAMS 2 //wreq->nr_io_streams << 239 struct netfs_group *group; << 240 struct folio_queue *buffer; << 241 struct folio_queue *buffer_tail; << 242 struct iov_iter iter; << 243 struct iov_iter io_iter; << 244 void *netfs_priv; 183 void *netfs_priv; /* Private data for the netfs */ 245 void *netfs_priv2; << 246 struct bio_vec *direct_bv; << 247 unsigned int direct_bv_coun << 248 unsigned int debug_id; 184 unsigned int debug_id; 249 unsigned int rsize; << 250 unsigned int wsize; << 251 atomic_t subreq_counter << 252 unsigned int nr_group_rel; << 253 spinlock_t lock; << 254 atomic_t nr_outstanding 185 atomic_t nr_outstanding; /* Number of ops in progress */ 255 unsigned long long submitted; !! 186 atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ 256 unsigned long long len; !! 187 size_t submitted; /* Amount submitted for I/O so far */ 257 size_t transferred; !! 188 size_t len; /* Length of the request */ 258 long error; !! 189 short error; /* 0 or error that occurred */ 259 enum netfs_io_origin origin; 190 enum netfs_io_origin origin; /* Origin of the request */ 260 bool direct_bv_unpi !! 191 loff_t i_size; /* Size of the file */ 261 u8 buffer_head_sl !! 192 loff_t start; /* Start position */ 262 u8 buffer_tail_sl << 263 unsigned long long i_size; << 264 unsigned long long start; << 265 atomic64_t issued_to; << 266 unsigned long long collected_to; << 267 unsigned long long cleaned_to; << 268 pgoff_t no_unlock_foli 193 pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ 269 size_t prev_donated; << 270 refcount_t ref; 194 refcount_t ref; 271 unsigned long flags; 195 unsigned long flags; >> 196 #define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ 272 #define NETFS_RREQ_COPY_TO_CACHE 1 197 #define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ 273 #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 198 #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ 274 #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 199 #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ 275 #define NETFS_RREQ_FAILED 4 200 #define NETFS_RREQ_FAILED 4 /* The request failed */ 276 #define NETFS_RREQ_IN_PROGRESS 5 201 #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ 277 #define NETFS_RREQ_UPLOAD_TO_SERVER 8 << 278 #define NETFS_RREQ_NONBLOCK 9 << 279 #define NETFS_RREQ_BLOCKED 10 << 280 #define NETFS_RREQ_PAUSE 11 << 281 #define NETFS_RREQ_USE_IO_ITER 12 << 282 #define NETFS_RREQ_ALL_QUEUED 13 << 283 #define NETFS_RREQ_NEED_RETRY 14 << 284 #define NETFS_RREQ_USE_PGPRIV2 31 << 285 << 286 const struct netfs_request_ops *netfs_ 202 const struct netfs_request_ops *netfs_ops; 287 void (*cleanup)(struct netfs_io_reques << 288 }; 203 }; 289 204 290 /* 205 /* 291 * Operations the network filesystem can/must 206 * Operations the network filesystem can/must provide to the helpers. 292 */ 207 */ 293 struct netfs_request_ops { 208 struct netfs_request_ops { 294 mempool_t *request_pool; << 295 mempool_t *subrequest_pool; << 296 int (*init_request)(struct netfs_io_re 209 int (*init_request)(struct netfs_io_request *rreq, struct file *file); 297 void (*free_request)(struct netfs_io_r 210 void (*free_request)(struct netfs_io_request *rreq); 298 void (*free_subrequest)(struct netfs_i !! 211 int (*begin_cache_operation)(struct netfs_io_request *rreq); 299 212 300 /* Read request handling */ << 301 void (*expand_readahead)(struct netfs_ 213 void (*expand_readahead)(struct netfs_io_request *rreq); 302 int (*prepare_read)(struct netfs_io_su !! 214 bool (*clamp_length)(struct netfs_io_subrequest *subreq); 303 void (*issue_read)(struct netfs_io_sub 215 void (*issue_read)(struct netfs_io_subrequest *subreq); 304 bool (*is_still_valid)(struct netfs_io 216 bool (*is_still_valid)(struct netfs_io_request *rreq); 305 int (*check_write_begin)(struct file * 217 int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, 306 struct folio 218 struct folio **foliop, void **_fsdata); 307 void (*done)(struct netfs_io_request * 219 void (*done)(struct netfs_io_request *rreq); 308 << 309 /* Modification handling */ << 310 void (*update_i_size)(struct inode *in << 311 void (*post_modify)(struct inode *inod << 312 << 313 /* Write request handling */ << 314 void (*begin_writeback)(struct netfs_i << 315 void (*prepare_write)(struct netfs_io_ << 316 void (*issue_write)(struct netfs_io_su << 317 void (*retry_request)(struct netfs_io_ << 318 void (*invalidate_cache)(struct netfs_ << 319 }; 220 }; 320 221 321 /* 222 /* 322 * How to handle reading from a hole. 223 * How to handle reading from a hole. 323 */ 224 */ 324 enum netfs_read_from_hole { 225 enum netfs_read_from_hole { 325 NETFS_READ_HOLE_IGNORE, 226 NETFS_READ_HOLE_IGNORE, 326 NETFS_READ_HOLE_CLEAR, 227 NETFS_READ_HOLE_CLEAR, 327 NETFS_READ_HOLE_FAIL, 228 NETFS_READ_HOLE_FAIL, 328 }; 229 }; 329 230 330 /* 231 /* 331 * Table of operations for access to a cache. !! 232 * Table of operations for access to a cache. This is obtained by >> 233 * rreq->ops->begin_cache_operation(). 332 */ 234 */ 333 struct netfs_cache_ops { 235 struct netfs_cache_ops { 334 /* End an operation */ 236 /* End an operation */ 335 void (*end_operation)(struct netfs_cac 237 void (*end_operation)(struct netfs_cache_resources *cres); 336 238 337 /* Read data from the cache */ 239 /* Read data from the cache */ 338 int (*read)(struct netfs_cache_resourc 240 int (*read)(struct netfs_cache_resources *cres, 339 loff_t start_pos, 241 loff_t start_pos, 340 struct iov_iter *iter, 242 struct iov_iter *iter, 341 enum netfs_read_from_hole 243 enum netfs_read_from_hole read_hole, 342 netfs_io_terminated_t term 244 netfs_io_terminated_t term_func, 343 void *term_func_priv); 245 void *term_func_priv); 344 246 345 /* Write data to the cache */ 247 /* Write data to the cache */ 346 int (*write)(struct netfs_cache_resour 248 int (*write)(struct netfs_cache_resources *cres, 347 loff_t start_pos, 249 loff_t start_pos, 348 struct iov_iter *iter, 250 struct iov_iter *iter, 349 netfs_io_terminated_t ter 251 netfs_io_terminated_t term_func, 350 void *term_func_priv); 252 void *term_func_priv); 351 253 352 /* Write data to the cache from a netf << 353 void (*issue_write)(struct netfs_io_su << 354 << 355 /* Expand readahead request */ 254 /* Expand readahead request */ 356 void (*expand_readahead)(struct netfs_ 255 void (*expand_readahead)(struct netfs_cache_resources *cres, 357 unsigned long !! 256 loff_t *_start, size_t *_len, loff_t i_size); 358 unsigned long << 359 unsigned long << 360 257 361 /* Prepare a read operation, shortenin 258 /* Prepare a read operation, shortening it to a cached/uncached 362 * boundary as appropriate. 259 * boundary as appropriate. 363 */ 260 */ 364 enum netfs_io_source (*prepare_read)(s 261 enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, 365 u !! 262 loff_t i_size); 366 << 367 /* Prepare a write subrequest, working << 368 * and finding out the maximum amount << 369 * attempting to submit. If we're not << 370 * subrequest should be marked failed. << 371 */ << 372 void (*prepare_write_subreq)(struct ne << 373 263 374 /* Prepare a write operation, working 264 /* Prepare a write operation, working out what part of the write we can 375 * actually do. 265 * actually do. 376 */ 266 */ 377 int (*prepare_write)(struct netfs_cach 267 int (*prepare_write)(struct netfs_cache_resources *cres, 378 loff_t *_start, s !! 268 loff_t *_start, size_t *_len, loff_t i_size, 379 loff_t i_size, bo !! 269 bool no_space_allocated_yet); 380 270 381 /* Prepare an on-demand read operation 271 /* Prepare an on-demand read operation, shortening it to a cached/uncached 382 * boundary as appropriate. 272 * boundary as appropriate. 383 */ 273 */ 384 enum netfs_io_source (*prepare_ondeman 274 enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres, 385 275 loff_t start, size_t *_len, 386 276 loff_t i_size, 387 277 unsigned long *_flags, ino_t ino); 388 278 389 /* Query the occupancy of the cache in 279 /* Query the occupancy of the cache in a region, returning where the 390 * next chunk of data starts and how l 280 * next chunk of data starts and how long it is. 391 */ 281 */ 392 int (*query_occupancy)(struct netfs_ca 282 int (*query_occupancy)(struct netfs_cache_resources *cres, 393 loff_t start, s 283 loff_t start, size_t len, size_t granularity, 394 loff_t *_data_s 284 loff_t *_data_start, size_t *_data_len); 395 }; 285 }; 396 286 397 /* High-level read API. */ << 398 ssize_t netfs_unbuffered_read_iter_locked(stru << 399 ssize_t netfs_unbuffered_read_iter(struct kioc << 400 ssize_t netfs_buffered_read_iter(struct kiocb << 401 ssize_t netfs_file_read_iter(struct kiocb *ioc << 402 << 403 /* High-level write API */ << 404 ssize_t netfs_perform_write(struct kiocb *iocb << 405 struct netfs_group << 406 ssize_t netfs_buffered_write_iter_locked(struc << 407 struc << 408 ssize_t netfs_unbuffered_write_iter(struct kio << 409 ssize_t netfs_unbuffered_write_iter_locked(str << 410 str << 411 ssize_t netfs_file_write_iter(struct kiocb *io << 412 << 413 /* Address operations API */ << 414 struct readahead_control; 287 struct readahead_control; 415 void netfs_readahead(struct readahead_control 288 void netfs_readahead(struct readahead_control *); 416 int netfs_read_folio(struct file *, struct fol 289 int netfs_read_folio(struct file *, struct folio *); 417 int netfs_write_begin(struct netfs_inode *, st 290 int netfs_write_begin(struct netfs_inode *, struct file *, 418 struct address_space *, !! 291 struct address_space *, loff_t pos, unsigned int len, 419 struct folio **, void ** !! 292 struct folio **, void **fsdata); 420 int netfs_writepages(struct address_space *map !! 293 421 struct writeback_control !! 294 void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); 422 bool netfs_dirty_folio(struct address_space *m << 423 int netfs_unpin_writeback(struct inode *inode, << 424 void netfs_clear_inode_writeback(struct inode << 425 void netfs_invalidate_folio(struct folio *foli << 426 bool netfs_release_folio(struct folio *folio, << 427 << 428 /* VMA operations API. */ << 429 vm_fault_t netfs_page_mkwrite(struct vm_fault << 430 << 431 /* (Sub)request management API. */ << 432 void netfs_read_subreq_progress(struct netfs_i << 433 bool was_async << 434 void netfs_read_subreq_terminated(struct netfs << 435 int error, b << 436 void netfs_get_subrequest(struct netfs_io_subr 295 void netfs_get_subrequest(struct netfs_io_subrequest *subreq, 437 enum netfs_sreq_ref_ 296 enum netfs_sreq_ref_trace what); 438 void netfs_put_subrequest(struct netfs_io_subr 297 void netfs_put_subrequest(struct netfs_io_subrequest *subreq, 439 bool was_async, enum 298 bool was_async, enum netfs_sreq_ref_trace what); >> 299 void netfs_stats_show(struct seq_file *); 440 ssize_t netfs_extract_user_iter(struct iov_ite 300 ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, 441 struct iov_ite 301 struct iov_iter *new, 442 iov_iter_extra 302 iov_iter_extraction_t extraction_flags); 443 size_t netfs_limit_iter(const struct iov_iter << 444 size_t max_size, size_ << 445 void netfs_prepare_write_failed(struct netfs_i << 446 void netfs_write_subrequest_terminated(void *_ << 447 bool wa << 448 void netfs_queue_write_request(struct netfs_io << 449 << 450 int netfs_start_io_read(struct inode *inode); << 451 void netfs_end_io_read(struct inode *inode); << 452 int netfs_start_io_write(struct inode *inode); << 453 void netfs_end_io_write(struct inode *inode); << 454 int netfs_start_io_direct(struct inode *inode) << 455 void netfs_end_io_direct(struct inode *inode); << 456 303 457 /** 304 /** 458 * netfs_inode - Get the netfs inode context f 305 * netfs_inode - Get the netfs inode context from the inode 459 * @inode: The inode to query 306 * @inode: The inode to query 460 * 307 * 461 * Get the netfs lib inode context from the ne 308 * Get the netfs lib inode context from the network filesystem's inode. The 462 * context struct is expected to directly foll 309 * context struct is expected to directly follow on from the VFS inode struct. 463 */ 310 */ 464 static inline struct netfs_inode *netfs_inode( 311 static inline struct netfs_inode *netfs_inode(struct inode *inode) 465 { 312 { 466 return container_of(inode, struct netf 313 return container_of(inode, struct netfs_inode, inode); 467 } 314 } 468 315 469 /** 316 /** 470 * netfs_inode_init - Initialise a netfslib in 317 * netfs_inode_init - Initialise a netfslib inode context 471 * @ctx: The netfs inode to initialise 318 * @ctx: The netfs inode to initialise 472 * @ops: The netfs's operations list 319 * @ops: The netfs's operations list 473 * @use_zero_point: True to use the zero_point << 474 * 320 * 475 * Initialise the netfs library context struct 321 * Initialise the netfs library context struct. This is expected to follow on 476 * directly from the VFS inode struct. 322 * directly from the VFS inode struct. 477 */ 323 */ 478 static inline void netfs_inode_init(struct net 324 static inline void netfs_inode_init(struct netfs_inode *ctx, 479 const stru !! 325 const struct netfs_request_ops *ops) 480 bool use_z << 481 { 326 { 482 ctx->ops = ops; 327 ctx->ops = ops; 483 ctx->remote_i_size = i_size_read(&ctx- 328 ctx->remote_i_size = i_size_read(&ctx->inode); 484 ctx->zero_point = LLONG_MAX; << 485 ctx->flags = 0; << 486 atomic_set(&ctx->io_count, 0); << 487 #if IS_ENABLED(CONFIG_FSCACHE) 329 #if IS_ENABLED(CONFIG_FSCACHE) 488 ctx->cache = NULL; 330 ctx->cache = NULL; 489 #endif 331 #endif 490 mutex_init(&ctx->wb_lock); << 491 /* ->releasepage() drives zero_point * << 492 if (use_zero_point) { << 493 ctx->zero_point = ctx->remote_ << 494 mapping_set_release_always(ctx << 495 } << 496 } 332 } 497 333 498 /** 334 /** 499 * netfs_resize_file - Note that a file got re 335 * netfs_resize_file - Note that a file got resized 500 * @ctx: The netfs inode being resized 336 * @ctx: The netfs inode being resized 501 * @new_i_size: The new file size 337 * @new_i_size: The new file size 502 * @changed_on_server: The change was applied << 503 * 338 * 504 * Inform the netfs lib that a file got resize 339 * Inform the netfs lib that a file got resized so that it can adjust its state. 505 */ 340 */ 506 static inline void netfs_resize_file(struct ne !! 341 static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size) 507 bool chan << 508 { 342 { 509 if (changed_on_server) !! 343 ctx->remote_i_size = new_i_size; 510 ctx->remote_i_size = new_i_siz << 511 if (new_i_size < ctx->zero_point) << 512 ctx->zero_point = new_i_size; << 513 } 344 } 514 345 515 /** 346 /** 516 * netfs_i_cookie - Get the cache cookie from 347 * netfs_i_cookie - Get the cache cookie from the inode 517 * @ctx: The netfs inode to query 348 * @ctx: The netfs inode to query 518 * 349 * 519 * Get the caching cookie (if enabled) from th 350 * Get the caching cookie (if enabled) from the network filesystem's inode. 520 */ 351 */ 521 static inline struct fscache_cookie *netfs_i_c 352 static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx) 522 { 353 { 523 #if IS_ENABLED(CONFIG_FSCACHE) 354 #if IS_ENABLED(CONFIG_FSCACHE) 524 return ctx->cache; 355 return ctx->cache; 525 #else 356 #else 526 return NULL; 357 return NULL; 527 #endif 358 #endif 528 } << 529 << 530 /** << 531 * netfs_wait_for_outstanding_io - Wait for ou << 532 * @inode: The netfs inode to wait on << 533 * << 534 * Wait for outstanding I/O requests of any ty << 535 * to be called from inode eviction routines. << 536 * resources held by those requests are cleane << 537 * cleaned up. << 538 */ << 539 static inline void netfs_wait_for_outstanding_ << 540 { << 541 struct netfs_inode *ictx = netfs_inode << 542 << 543 wait_var_event(&ictx->io_count, atomic << 544 } 359 } 545 360 546 #endif /* _LINUX_NETFS_H */ 361 #endif /* _LINUX_NETFS_H */ 547 362
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.