1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* NFS filesystem cache interface 3 * 4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/mm.h> 12 #include <linux/nfs_fs.h> 13 #include <linux/nfs_fs_sb.h> 14 #include <linux/in6.h> 15 #include <linux/seq_file.h> 16 #include <linux/slab.h> 17 #include <linux/iversion.h> 18 #include <linux/xarray.h> 19 #include <linux/fscache.h> 20 #include <linux/netfs.h> 21 22 #include "internal.h" 23 #include "iostat.h" 24 #include "fscache.h" 25 #include "nfstrace.h" 26 27 #define NFS_MAX_KEY_LEN 1000 28 29 static bool nfs_append_int(char *key, int *_len, unsigned long long x) 30 { 31 if (*_len > NFS_MAX_KEY_LEN) 32 return false; 33 if (x == 0) 34 key[(*_len)++] = ','; 35 else 36 *_len += sprintf(key + *_len, ",%llx", x); 37 return true; 38 } 39 40 /* 41 * Get the per-client index cookie for an NFS client if the appropriate mount 42 * flag was set 43 * - We always try and get an index cookie for the client, but get filehandle 44 * cookies on a per-superblock basis, depending on the mount flags 45 */ 46 static bool nfs_fscache_get_client_key(struct nfs_client *clp, 47 char *key, int *_len) 48 { 49 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr; 50 const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr; 51 52 *_len += snprintf(key + *_len, NFS_MAX_KEY_LEN - *_len, 53 ",%u.%u,%x", 54 clp->rpc_ops->version, 55 clp->cl_minorversion, 56 clp->cl_addr.ss_family); 57 58 switch (clp->cl_addr.ss_family) { 59 case AF_INET: 60 if (!nfs_append_int(key, _len, sin->sin_port) || 61 !nfs_append_int(key, _len, sin->sin_addr.s_addr)) 62 return false; 63 return true; 64 65 case AF_INET6: 66 if (!nfs_append_int(key, _len, sin6->sin6_port) || 67 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[0]) || 68 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[1]) || 69 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[2]) || 70 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[3])) 71 return false; 72 return true; 73 74 default: 75 printk(KERN_WARNING "NFS: Unknown network family '%d'\n", 76 clp->cl_addr.ss_family); 77 return false; 78 } 79 } 80 81 /* 82 * Get the cache cookie for an NFS superblock. 83 * 84 * The default uniquifier is just an empty string, but it may be overridden 85 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent 86 * superblock across an automount point of some nature. 87 */ 88 int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen) 89 { 90 struct fscache_volume *vcookie; 91 struct nfs_server *nfss = NFS_SB(sb); 92 unsigned int len = 3; 93 char *key; 94 95 if (uniq) { 96 nfss->fscache_uniq = kmemdup_nul(uniq, ulen, GFP_KERNEL); 97 if (!nfss->fscache_uniq) 98 return -ENOMEM; 99 } 100 101 key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL); 102 if (!key) 103 return -ENOMEM; 104 105 memcpy(key, "nfs", 3); 106 if (!nfs_fscache_get_client_key(nfss->nfs_client, key, &len) || 107 !nfs_append_int(key, &len, nfss->fsid.major) || 108 !nfs_append_int(key, &len, nfss->fsid.minor) || 109 !nfs_append_int(key, &len, sb->s_flags & NFS_SB_MASK) || 110 !nfs_append_int(key, &len, nfss->flags) || 111 !nfs_append_int(key, &len, nfss->rsize) || 112 !nfs_append_int(key, &len, nfss->wsize) || 113 !nfs_append_int(key, &len, nfss->acregmin) || 114 !nfs_append_int(key, &len, nfss->acregmax) || 115 !nfs_append_int(key, &len, nfss->acdirmin) || 116 !nfs_append_int(key, &len, nfss->acdirmax) || 117 !nfs_append_int(key, &len, nfss->client->cl_auth->au_flavor)) 118 goto out; 119 120 if (ulen > 0) { 121 if (ulen > NFS_MAX_KEY_LEN - len) 122 goto out; 123 key[len++] = ','; 124 memcpy(key + len, uniq, ulen); 125 len += ulen; 126 } 127 key[len] = 0; 128 129 /* create a cache index for looking up filehandles */ 130 vcookie = fscache_acquire_volume(key, 131 NULL, /* preferred_cache */ 132 NULL, 0 /* coherency_data */); 133 if (IS_ERR(vcookie)) { 134 if (vcookie != ERR_PTR(-EBUSY)) { 135 kfree(key); 136 return PTR_ERR(vcookie); 137 } 138 pr_err("NFS: Cache volume key already in use (%s)\n", key); 139 vcookie = NULL; 140 } 141 nfss->fscache = vcookie; 142 143 out: 144 kfree(key); 145 return 0; 146 } 147 148 /* 149 * release a per-superblock cookie 150 */ 151 void nfs_fscache_release_super_cookie(struct super_block *sb) 152 { 153 struct nfs_server *nfss = NFS_SB(sb); 154 155 fscache_relinquish_volume(nfss->fscache, NULL, false); 156 nfss->fscache = NULL; 157 kfree(nfss->fscache_uniq); 158 } 159 160 /* 161 * Initialise the per-inode cache cookie pointer for an NFS inode. 162 */ 163 void nfs_fscache_init_inode(struct inode *inode) 164 { 165 struct nfs_fscache_inode_auxdata auxdata; 166 struct nfs_server *nfss = NFS_SERVER(inode); 167 struct nfs_inode *nfsi = NFS_I(inode); 168 169 netfs_inode(inode)->cache = NULL; 170 if (!(nfss->fscache && S_ISREG(inode->i_mode))) 171 return; 172 173 nfs_fscache_update_auxdata(&auxdata, inode); 174 175 netfs_inode(inode)->cache = fscache_acquire_cookie( 176 nfss->fscache, 177 0, 178 nfsi->fh.data, /* index_key */ 179 nfsi->fh.size, 180 &auxdata, /* aux_data */ 181 sizeof(auxdata), 182 i_size_read(inode)); 183 184 if (netfs_inode(inode)->cache) 185 mapping_set_release_always(inode->i_mapping); 186 } 187 188 /* 189 * Release a per-inode cookie. 190 */ 191 void nfs_fscache_clear_inode(struct inode *inode) 192 { 193 fscache_relinquish_cookie(netfs_i_cookie(netfs_inode(inode)), false); 194 netfs_inode(inode)->cache = NULL; 195 } 196 197 /* 198 * Enable or disable caching for a file that is being opened as appropriate. 199 * The cookie is allocated when the inode is initialised, but is not enabled at 200 * that time. Enablement is deferred to file-open time to avoid stat() and 201 * access() thrashing the cache. 202 * 203 * For now, with NFS, only regular files that are open read-only will be able 204 * to use the cache. 205 * 206 * We enable the cache for an inode if we open it read-only and it isn't 207 * currently open for writing. We disable the cache if the inode is open 208 * write-only. 209 * 210 * The caller uses the file struct to pin i_writecount on the inode before 211 * calling us when a file is opened for writing, so we can make use of that. 212 * 213 * Note that this may be invoked multiple times in parallel by parallel 214 * nfs_open() functions. 215 */ 216 void nfs_fscache_open_file(struct inode *inode, struct file *filp) 217 { 218 struct nfs_fscache_inode_auxdata auxdata; 219 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 220 bool open_for_write = inode_is_open_for_write(inode); 221 222 if (!fscache_cookie_valid(cookie)) 223 return; 224 225 fscache_use_cookie(cookie, open_for_write); 226 if (open_for_write) { 227 nfs_fscache_update_auxdata(&auxdata, inode); 228 fscache_invalidate(cookie, &auxdata, i_size_read(inode), 229 FSCACHE_INVAL_DIO_WRITE); 230 } 231 } 232 EXPORT_SYMBOL_GPL(nfs_fscache_open_file); 233 234 void nfs_fscache_release_file(struct inode *inode, struct file *filp) 235 { 236 struct nfs_fscache_inode_auxdata auxdata; 237 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 238 loff_t i_size = i_size_read(inode); 239 240 nfs_fscache_update_auxdata(&auxdata, inode); 241 fscache_unuse_cookie(cookie, &auxdata, &i_size); 242 } 243 244 int nfs_netfs_read_folio(struct file *file, struct folio *folio) 245 { 246 if (!netfs_inode(folio_inode(folio))->cache) 247 return -ENOBUFS; 248 249 return netfs_read_folio(file, folio); 250 } 251 252 int nfs_netfs_readahead(struct readahead_control *ractl) 253 { 254 struct inode *inode = ractl->mapping->host; 255 256 if (!netfs_inode(inode)->cache) 257 return -ENOBUFS; 258 259 netfs_readahead(ractl); 260 return 0; 261 } 262 263 static atomic_t nfs_netfs_debug_id; 264 static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file) 265 { 266 rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file)); 267 rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id); 268 /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 269 __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); 270 271 return 0; 272 } 273 274 static void nfs_netfs_free_request(struct netfs_io_request *rreq) 275 { 276 put_nfs_open_context(rreq->netfs_priv); 277 } 278 279 static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq) 280 { 281 struct nfs_netfs_io_data *netfs; 282 283 netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT); 284 if (!netfs) 285 return NULL; 286 netfs->sreq = sreq; 287 refcount_set(&netfs->refcount, 1); 288 return netfs; 289 } 290 291 static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq) 292 { 293 size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize; 294 295 sreq->len = min(sreq->len, rsize); 296 return true; 297 } 298 299 static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq) 300 { 301 struct nfs_netfs_io_data *netfs; 302 struct nfs_pageio_descriptor pgio; 303 struct inode *inode = sreq->rreq->inode; 304 struct nfs_open_context *ctx = sreq->rreq->netfs_priv; 305 struct page *page; 306 unsigned long idx; 307 int err; 308 pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT; 309 pgoff_t last = ((sreq->start + sreq->len - 310 sreq->transferred - 1) >> PAGE_SHIFT); 311 312 nfs_pageio_init_read(&pgio, inode, false, 313 &nfs_async_read_completion_ops); 314 315 netfs = nfs_netfs_alloc(sreq); 316 if (!netfs) 317 return netfs_subreq_terminated(sreq, -ENOMEM, false); 318 319 pgio.pg_netfs = netfs; /* used in completion */ 320 321 xa_for_each_range(&sreq->rreq->mapping->i_pages, idx, page, start, last) { 322 /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */ 323 err = nfs_read_add_folio(&pgio, ctx, page_folio(page)); 324 if (err < 0) { 325 netfs->error = err; 326 goto out; 327 } 328 } 329 out: 330 nfs_pageio_complete_read(&pgio); 331 nfs_netfs_put(netfs); 332 } 333 334 void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) 335 { 336 struct nfs_netfs_io_data *netfs = hdr->netfs; 337 338 if (!netfs) 339 return; 340 341 nfs_netfs_get(netfs); 342 } 343 344 int nfs_netfs_folio_unlock(struct folio *folio) 345 { 346 struct inode *inode = folio->mapping->host; 347 348 /* 349 * If fscache is enabled, netfs will unlock pages. 350 */ 351 if (netfs_inode(inode)->cache) 352 return 0; 353 354 return 1; 355 } 356 357 void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) 358 { 359 struct nfs_netfs_io_data *netfs = hdr->netfs; 360 struct netfs_io_subrequest *sreq; 361 362 if (!netfs) 363 return; 364 365 sreq = netfs->sreq; 366 if (test_bit(NFS_IOHDR_EOF, &hdr->flags) && 367 sreq->rreq->origin != NETFS_DIO_READ) 368 __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags); 369 370 if (hdr->error) 371 netfs->error = hdr->error; 372 else 373 atomic64_add(hdr->res.count, &netfs->transferred); 374 375 nfs_netfs_put(netfs); 376 hdr->netfs = NULL; 377 } 378 379 const struct netfs_request_ops nfs_netfs_ops = { 380 .init_request = nfs_netfs_init_request, 381 .free_request = nfs_netfs_free_request, 382 .issue_read = nfs_netfs_issue_read, 383 .clamp_length = nfs_netfs_clamp_length 384 }; 385
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.