~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/cachefiles/io.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /* kiocb-using read/write
  3  *
  4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
  5  * Written by David Howells (dhowells@redhat.com)
  6  */
  7 
  8 #include <linux/mount.h>
  9 #include <linux/slab.h>
 10 #include <linux/file.h>
 11 #include <linux/uio.h>
 12 #include <linux/bio.h>
 13 #include <linux/falloc.h>
 14 #include <linux/sched/mm.h>
 15 #include <trace/events/fscache.h>
 16 #include "internal.h"
 17 
 18 struct cachefiles_kiocb {
 19         struct kiocb            iocb;
 20         refcount_t              ki_refcnt;
 21         loff_t                  start;
 22         union {
 23                 size_t          skipped;
 24                 size_t          len;
 25         };
 26         struct cachefiles_object *object;
 27         netfs_io_terminated_t   term_func;
 28         void                    *term_func_priv;
 29         bool                    was_async;
 30         unsigned int            inval_counter;  /* Copy of cookie->inval_counter */
 31         u64                     b_writing;
 32 };
 33 
 34 static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
 35 {
 36         if (refcount_dec_and_test(&ki->ki_refcnt)) {
 37                 cachefiles_put_object(ki->object, cachefiles_obj_put_ioreq);
 38                 fput(ki->iocb.ki_filp);
 39                 kfree(ki);
 40         }
 41 }
 42 
 43 /*
 44  * Handle completion of a read from the cache.
 45  */
 46 static void cachefiles_read_complete(struct kiocb *iocb, long ret)
 47 {
 48         struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
 49         struct inode *inode = file_inode(ki->iocb.ki_filp);
 50 
 51         _enter("%ld", ret);
 52 
 53         if (ret < 0)
 54                 trace_cachefiles_io_error(ki->object, inode, ret,
 55                                           cachefiles_trace_read_error);
 56 
 57         if (ki->term_func) {
 58                 if (ret >= 0) {
 59                         if (ki->object->cookie->inval_counter == ki->inval_counter)
 60                                 ki->skipped += ret;
 61                         else
 62                                 ret = -ESTALE;
 63                 }
 64 
 65                 ki->term_func(ki->term_func_priv, ret, ki->was_async);
 66         }
 67 
 68         cachefiles_put_kiocb(ki);
 69 }
 70 
 71 /*
 72  * Initiate a read from the cache.
 73  */
 74 static int cachefiles_read(struct netfs_cache_resources *cres,
 75                            loff_t start_pos,
 76                            struct iov_iter *iter,
 77                            enum netfs_read_from_hole read_hole,
 78                            netfs_io_terminated_t term_func,
 79                            void *term_func_priv)
 80 {
 81         struct cachefiles_object *object;
 82         struct cachefiles_kiocb *ki;
 83         struct file *file;
 84         unsigned int old_nofs;
 85         ssize_t ret = -ENOBUFS;
 86         size_t len = iov_iter_count(iter), skipped = 0;
 87 
 88         if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
 89                 goto presubmission_error;
 90 
 91         fscache_count_read();
 92         object = cachefiles_cres_object(cres);
 93         file = cachefiles_cres_file(cres);
 94 
 95         _enter("%pD,%li,%llx,%zx/%llx",
 96                file, file_inode(file)->i_ino, start_pos, len,
 97                i_size_read(file_inode(file)));
 98 
 99         /* If the caller asked us to seek for data before doing the read, then
100          * we should do that now.  If we find a gap, we fill it with zeros.
101          */
102         if (read_hole != NETFS_READ_HOLE_IGNORE) {
103                 loff_t off = start_pos, off2;
104 
105                 off2 = cachefiles_inject_read_error();
106                 if (off2 == 0)
107                         off2 = vfs_llseek(file, off, SEEK_DATA);
108                 if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
109                         skipped = 0;
110                         ret = off2;
111                         goto presubmission_error;
112                 }
113 
114                 if (off2 == -ENXIO || off2 >= start_pos + len) {
115                         /* The region is beyond the EOF or there's no more data
116                          * in the region, so clear the rest of the buffer and
117                          * return success.
118                          */
119                         ret = -ENODATA;
120                         if (read_hole == NETFS_READ_HOLE_FAIL)
121                                 goto presubmission_error;
122 
123                         iov_iter_zero(len, iter);
124                         skipped = len;
125                         ret = 0;
126                         goto presubmission_error;
127                 }
128 
129                 skipped = off2 - off;
130                 iov_iter_zero(skipped, iter);
131         }
132 
133         ret = -ENOMEM;
134         ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
135         if (!ki)
136                 goto presubmission_error;
137 
138         refcount_set(&ki->ki_refcnt, 2);
139         ki->iocb.ki_filp        = file;
140         ki->iocb.ki_pos         = start_pos + skipped;
141         ki->iocb.ki_flags       = IOCB_DIRECT;
142         ki->iocb.ki_ioprio      = get_current_ioprio();
143         ki->skipped             = skipped;
144         ki->object              = object;
145         ki->inval_counter       = cres->inval_counter;
146         ki->term_func           = term_func;
147         ki->term_func_priv      = term_func_priv;
148         ki->was_async           = true;
149 
150         if (ki->term_func)
151                 ki->iocb.ki_complete = cachefiles_read_complete;
152 
153         get_file(ki->iocb.ki_filp);
154         cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
155 
156         trace_cachefiles_read(object, file_inode(file), ki->iocb.ki_pos, len - skipped);
157         old_nofs = memalloc_nofs_save();
158         ret = cachefiles_inject_read_error();
159         if (ret == 0)
160                 ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
161         memalloc_nofs_restore(old_nofs);
162         switch (ret) {
163         case -EIOCBQUEUED:
164                 goto in_progress;
165 
166         case -ERESTARTSYS:
167         case -ERESTARTNOINTR:
168         case -ERESTARTNOHAND:
169         case -ERESTART_RESTARTBLOCK:
170                 /* There's no easy way to restart the syscall since other AIO's
171                  * may be already running. Just fail this IO with EINTR.
172                  */
173                 ret = -EINTR;
174                 fallthrough;
175         default:
176                 ki->was_async = false;
177                 cachefiles_read_complete(&ki->iocb, ret);
178                 if (ret > 0)
179                         ret = 0;
180                 break;
181         }
182 
183 in_progress:
184         cachefiles_put_kiocb(ki);
185         _leave(" = %zd", ret);
186         return ret;
187 
188 presubmission_error:
189         if (term_func)
190                 term_func(term_func_priv, ret < 0 ? ret : skipped, false);
191         return ret;
192 }
193 
194 /*
195  * Query the occupancy of the cache in a region, returning where the next chunk
196  * of data starts and how long it is.
197  */
198 static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
199                                       loff_t start, size_t len, size_t granularity,
200                                       loff_t *_data_start, size_t *_data_len)
201 {
202         struct cachefiles_object *object;
203         struct file *file;
204         loff_t off, off2;
205 
206         *_data_start = -1;
207         *_data_len = 0;
208 
209         if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
210                 return -ENOBUFS;
211 
212         object = cachefiles_cres_object(cres);
213         file = cachefiles_cres_file(cres);
214         granularity = max_t(size_t, object->volume->cache->bsize, granularity);
215 
216         _enter("%pD,%li,%llx,%zx/%llx",
217                file, file_inode(file)->i_ino, start, len,
218                i_size_read(file_inode(file)));
219 
220         off = cachefiles_inject_read_error();
221         if (off == 0)
222                 off = vfs_llseek(file, start, SEEK_DATA);
223         if (off == -ENXIO)
224                 return -ENODATA; /* Beyond EOF */
225         if (off < 0 && off >= (loff_t)-MAX_ERRNO)
226                 return -ENOBUFS; /* Error. */
227         if (round_up(off, granularity) >= start + len)
228                 return -ENODATA; /* No data in range */
229 
230         off2 = cachefiles_inject_read_error();
231         if (off2 == 0)
232                 off2 = vfs_llseek(file, off, SEEK_HOLE);
233         if (off2 == -ENXIO)
234                 return -ENODATA; /* Beyond EOF */
235         if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
236                 return -ENOBUFS; /* Error. */
237 
238         /* Round away partial blocks */
239         off = round_up(off, granularity);
240         off2 = round_down(off2, granularity);
241         if (off2 <= off)
242                 return -ENODATA;
243 
244         *_data_start = off;
245         if (off2 > start + len)
246                 *_data_len = len;
247         else
248                 *_data_len = off2 - off;
249         return 0;
250 }
251 
252 /*
253  * Handle completion of a write to the cache.
254  */
255 static void cachefiles_write_complete(struct kiocb *iocb, long ret)
256 {
257         struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
258         struct cachefiles_object *object = ki->object;
259         struct inode *inode = file_inode(ki->iocb.ki_filp);
260 
261         _enter("%ld", ret);
262 
263         if (ki->was_async)
264                 kiocb_end_write(iocb);
265 
266         if (ret < 0)
267                 trace_cachefiles_io_error(object, inode, ret,
268                                           cachefiles_trace_write_error);
269 
270         atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
271         set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
272         if (ki->term_func)
273                 ki->term_func(ki->term_func_priv, ret, ki->was_async);
274         cachefiles_put_kiocb(ki);
275 }
276 
277 /*
278  * Initiate a write to the cache.
279  */
280 int __cachefiles_write(struct cachefiles_object *object,
281                        struct file *file,
282                        loff_t start_pos,
283                        struct iov_iter *iter,
284                        netfs_io_terminated_t term_func,
285                        void *term_func_priv)
286 {
287         struct cachefiles_cache *cache;
288         struct cachefiles_kiocb *ki;
289         unsigned int old_nofs;
290         ssize_t ret;
291         size_t len = iov_iter_count(iter);
292 
293         fscache_count_write();
294         cache = object->volume->cache;
295 
296         _enter("%pD,%li,%llx,%zx/%llx",
297                file, file_inode(file)->i_ino, start_pos, len,
298                i_size_read(file_inode(file)));
299 
300         ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
301         if (!ki) {
302                 if (term_func)
303                         term_func(term_func_priv, -ENOMEM, false);
304                 return -ENOMEM;
305         }
306 
307         refcount_set(&ki->ki_refcnt, 2);
308         ki->iocb.ki_filp        = file;
309         ki->iocb.ki_pos         = start_pos;
310         ki->iocb.ki_flags       = IOCB_DIRECT | IOCB_WRITE;
311         ki->iocb.ki_ioprio      = get_current_ioprio();
312         ki->object              = object;
313         ki->start               = start_pos;
314         ki->len                 = len;
315         ki->term_func           = term_func;
316         ki->term_func_priv      = term_func_priv;
317         ki->was_async           = true;
318         ki->b_writing           = (len + (1 << cache->bshift) - 1) >> cache->bshift;
319 
320         if (ki->term_func)
321                 ki->iocb.ki_complete = cachefiles_write_complete;
322         atomic_long_add(ki->b_writing, &cache->b_writing);
323 
324         get_file(ki->iocb.ki_filp);
325         cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
326 
327         trace_cachefiles_write(object, file_inode(file), ki->iocb.ki_pos, len);
328         old_nofs = memalloc_nofs_save();
329         ret = cachefiles_inject_write_error();
330         if (ret == 0)
331                 ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
332         memalloc_nofs_restore(old_nofs);
333         switch (ret) {
334         case -EIOCBQUEUED:
335                 goto in_progress;
336 
337         case -ERESTARTSYS:
338         case -ERESTARTNOINTR:
339         case -ERESTARTNOHAND:
340         case -ERESTART_RESTARTBLOCK:
341                 /* There's no easy way to restart the syscall since other AIO's
342                  * may be already running. Just fail this IO with EINTR.
343                  */
344                 ret = -EINTR;
345                 fallthrough;
346         default:
347                 ki->was_async = false;
348                 cachefiles_write_complete(&ki->iocb, ret);
349                 if (ret > 0)
350                         ret = 0;
351                 break;
352         }
353 
354 in_progress:
355         cachefiles_put_kiocb(ki);
356         _leave(" = %zd", ret);
357         return ret;
358 }
359 
360 static int cachefiles_write(struct netfs_cache_resources *cres,
361                             loff_t start_pos,
362                             struct iov_iter *iter,
363                             netfs_io_terminated_t term_func,
364                             void *term_func_priv)
365 {
366         if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
367                 if (term_func)
368                         term_func(term_func_priv, -ENOBUFS, false);
369                 return -ENOBUFS;
370         }
371 
372         return __cachefiles_write(cachefiles_cres_object(cres),
373                                   cachefiles_cres_file(cres),
374                                   start_pos, iter,
375                                   term_func, term_func_priv);
376 }
377 
378 static inline enum netfs_io_source
379 cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
380                            loff_t start, size_t *_len, loff_t i_size,
381                            unsigned long *_flags, ino_t netfs_ino)
382 {
383         enum cachefiles_prepare_read_trace why;
384         struct cachefiles_object *object = NULL;
385         struct cachefiles_cache *cache;
386         struct fscache_cookie *cookie = fscache_cres_cookie(cres);
387         const struct cred *saved_cred;
388         struct file *file = cachefiles_cres_file(cres);
389         enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
390         size_t len = *_len;
391         loff_t off, to;
392         ino_t ino = file ? file_inode(file)->i_ino : 0;
393         int rc;
394 
395         _enter("%zx @%llx/%llx", len, start, i_size);
396 
397         if (start >= i_size) {
398                 ret = NETFS_FILL_WITH_ZEROES;
399                 why = cachefiles_trace_read_after_eof;
400                 goto out_no_object;
401         }
402 
403         if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
404                 __set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
405                 why = cachefiles_trace_read_no_data;
406                 if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
407                         goto out_no_object;
408         }
409 
410         /* The object and the file may be being created in the background. */
411         if (!file) {
412                 why = cachefiles_trace_read_no_file;
413                 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
414                         goto out_no_object;
415                 file = cachefiles_cres_file(cres);
416                 if (!file)
417                         goto out_no_object;
418                 ino = file_inode(file)->i_ino;
419         }
420 
421         object = cachefiles_cres_object(cres);
422         cache = object->volume->cache;
423         cachefiles_begin_secure(cache, &saved_cred);
424 retry:
425         off = cachefiles_inject_read_error();
426         if (off == 0)
427                 off = vfs_llseek(file, start, SEEK_DATA);
428         if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
429                 if (off == (loff_t)-ENXIO) {
430                         why = cachefiles_trace_read_seek_nxio;
431                         goto download_and_store;
432                 }
433                 trace_cachefiles_io_error(object, file_inode(file), off,
434                                           cachefiles_trace_seek_error);
435                 why = cachefiles_trace_read_seek_error;
436                 goto out;
437         }
438 
439         if (off >= start + len) {
440                 why = cachefiles_trace_read_found_hole;
441                 goto download_and_store;
442         }
443 
444         if (off > start) {
445                 off = round_up(off, cache->bsize);
446                 len = off - start;
447                 *_len = len;
448                 why = cachefiles_trace_read_found_part;
449                 goto download_and_store;
450         }
451 
452         to = cachefiles_inject_read_error();
453         if (to == 0)
454                 to = vfs_llseek(file, start, SEEK_HOLE);
455         if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
456                 trace_cachefiles_io_error(object, file_inode(file), to,
457                                           cachefiles_trace_seek_error);
458                 why = cachefiles_trace_read_seek_error;
459                 goto out;
460         }
461 
462         if (to < start + len) {
463                 if (start + len >= i_size)
464                         to = round_up(to, cache->bsize);
465                 else
466                         to = round_down(to, cache->bsize);
467                 len = to - start;
468                 *_len = len;
469         }
470 
471         why = cachefiles_trace_read_have_data;
472         ret = NETFS_READ_FROM_CACHE;
473         goto out;
474 
475 download_and_store:
476         __set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
477         if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
478                 rc = cachefiles_ondemand_read(object, start, len);
479                 if (!rc) {
480                         __clear_bit(NETFS_SREQ_ONDEMAND, _flags);
481                         goto retry;
482                 }
483                 ret = NETFS_INVALID_READ;
484         }
485 out:
486         cachefiles_end_secure(cache, saved_cred);
487 out_no_object:
488         trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino, netfs_ino);
489         return ret;
490 }
491 
492 /*
493  * Prepare a read operation, shortening it to a cached/uncached
494  * boundary as appropriate.
495  */
496 static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
497                                                     unsigned long long i_size)
498 {
499         return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
500                                           subreq->start, &subreq->len, i_size,
501                                           &subreq->flags, subreq->rreq->inode->i_ino);
502 }
503 
504 /*
505  * Prepare an on-demand read operation, shortening it to a cached/uncached
506  * boundary as appropriate.
507  */
508 static enum netfs_io_source
509 cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
510                                  loff_t start, size_t *_len, loff_t i_size,
511                                  unsigned long *_flags, ino_t ino)
512 {
513         return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags, ino);
514 }
515 
516 /*
517  * Prepare for a write to occur.
518  */
519 int __cachefiles_prepare_write(struct cachefiles_object *object,
520                                struct file *file,
521                                loff_t *_start, size_t *_len, size_t upper_len,
522                                bool no_space_allocated_yet)
523 {
524         struct cachefiles_cache *cache = object->volume->cache;
525         loff_t start = *_start, pos;
526         size_t len = *_len;
527         int ret;
528 
529         /* Round to DIO size */
530         start = round_down(*_start, PAGE_SIZE);
531         if (start != *_start || *_len > upper_len) {
532                 /* Probably asked to cache a streaming write written into the
533                  * pagecache when the cookie was temporarily out of service to
534                  * culling.
535                  */
536                 fscache_count_dio_misfit();
537                 return -ENOBUFS;
538         }
539 
540         *_len = round_up(len, PAGE_SIZE);
541 
542         /* We need to work out whether there's sufficient disk space to perform
543          * the write - but we can skip that check if we have space already
544          * allocated.
545          */
546         if (no_space_allocated_yet)
547                 goto check_space;
548 
549         pos = cachefiles_inject_read_error();
550         if (pos == 0)
551                 pos = vfs_llseek(file, start, SEEK_DATA);
552         if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
553                 if (pos == -ENXIO)
554                         goto check_space; /* Unallocated tail */
555                 trace_cachefiles_io_error(object, file_inode(file), pos,
556                                           cachefiles_trace_seek_error);
557                 return pos;
558         }
559         if ((u64)pos >= (u64)start + *_len)
560                 goto check_space; /* Unallocated region */
561 
562         /* We have a block that's at least partially filled - if we're low on
563          * space, we need to see if it's fully allocated.  If it's not, we may
564          * want to cull it.
565          */
566         if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
567                                  cachefiles_has_space_check) == 0)
568                 return 0; /* Enough space to simply overwrite the whole block */
569 
570         pos = cachefiles_inject_read_error();
571         if (pos == 0)
572                 pos = vfs_llseek(file, start, SEEK_HOLE);
573         if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
574                 trace_cachefiles_io_error(object, file_inode(file), pos,
575                                           cachefiles_trace_seek_error);
576                 return pos;
577         }
578         if ((u64)pos >= (u64)start + *_len)
579                 return 0; /* Fully allocated */
580 
581         /* Partially allocated, but insufficient space: cull. */
582         fscache_count_no_write_space();
583         ret = cachefiles_inject_remove_error();
584         if (ret == 0)
585                 ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
586                                     start, *_len);
587         if (ret < 0) {
588                 trace_cachefiles_io_error(object, file_inode(file), ret,
589                                           cachefiles_trace_fallocate_error);
590                 cachefiles_io_error_obj(object,
591                                         "CacheFiles: fallocate failed (%d)\n", ret);
592                 ret = -EIO;
593         }
594 
595         return ret;
596 
597 check_space:
598         return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
599                                     cachefiles_has_space_for_write);
600 }
601 
602 static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
603                                     loff_t *_start, size_t *_len, size_t upper_len,
604                                     loff_t i_size, bool no_space_allocated_yet)
605 {
606         struct cachefiles_object *object = cachefiles_cres_object(cres);
607         struct cachefiles_cache *cache = object->volume->cache;
608         const struct cred *saved_cred;
609         int ret;
610 
611         if (!cachefiles_cres_file(cres)) {
612                 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
613                         return -ENOBUFS;
614                 if (!cachefiles_cres_file(cres))
615                         return -ENOBUFS;
616         }
617 
618         cachefiles_begin_secure(cache, &saved_cred);
619         ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
620                                          _start, _len, upper_len,
621                                          no_space_allocated_yet);
622         cachefiles_end_secure(cache, saved_cred);
623         return ret;
624 }
625 
626 static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq)
627 {
628         struct netfs_io_request *wreq = subreq->rreq;
629         struct netfs_cache_resources *cres = &wreq->cache_resources;
630 
631         _enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start);
632 
633         subreq->max_len = MAX_RW_COUNT;
634         subreq->max_nr_segs = BIO_MAX_VECS;
635 
636         if (!cachefiles_cres_file(cres)) {
637                 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
638                         return netfs_prepare_write_failed(subreq);
639                 if (!cachefiles_cres_file(cres))
640                         return netfs_prepare_write_failed(subreq);
641         }
642 }
643 
644 static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
645 {
646         struct netfs_io_request *wreq = subreq->rreq;
647         struct netfs_cache_resources *cres = &wreq->cache_resources;
648         struct cachefiles_object *object = cachefiles_cres_object(cres);
649         struct cachefiles_cache *cache = object->volume->cache;
650         const struct cred *saved_cred;
651         size_t off, pre, post, len = subreq->len;
652         loff_t start = subreq->start;
653         int ret;
654 
655         _enter("W=%x[%x] %llx-%llx",
656                wreq->debug_id, subreq->debug_index, start, start + len - 1);
657 
658         /* We need to start on the cache granularity boundary */
659         off = start & (CACHEFILES_DIO_BLOCK_SIZE - 1);
660         if (off) {
661                 pre = CACHEFILES_DIO_BLOCK_SIZE - off;
662                 if (pre >= len) {
663                         netfs_write_subrequest_terminated(subreq, len, false);
664                         return;
665                 }
666                 subreq->transferred += pre;
667                 start += pre;
668                 len -= pre;
669                 iov_iter_advance(&subreq->io_iter, pre);
670         }
671 
672         /* We also need to end on the cache granularity boundary */
673         post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1);
674         if (post) {
675                 len -= post;
676                 if (len == 0) {
677                         netfs_write_subrequest_terminated(subreq, post, false);
678                         return;
679                 }
680                 iov_iter_truncate(&subreq->io_iter, len);
681         }
682 
683         cachefiles_begin_secure(cache, &saved_cred);
684         ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
685                                          &start, &len, len, true);
686         cachefiles_end_secure(cache, saved_cred);
687         if (ret < 0) {
688                 netfs_write_subrequest_terminated(subreq, ret, false);
689                 return;
690         }
691 
692         cachefiles_write(&subreq->rreq->cache_resources,
693                          subreq->start, &subreq->io_iter,
694                          netfs_write_subrequest_terminated, subreq);
695 }
696 
697 /*
698  * Clean up an operation.
699  */
700 static void cachefiles_end_operation(struct netfs_cache_resources *cres)
701 {
702         struct file *file = cachefiles_cres_file(cres);
703 
704         if (file)
705                 fput(file);
706         fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end);
707 }
708 
709 static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
710         .end_operation          = cachefiles_end_operation,
711         .read                   = cachefiles_read,
712         .write                  = cachefiles_write,
713         .issue_write            = cachefiles_issue_write,
714         .prepare_read           = cachefiles_prepare_read,
715         .prepare_write          = cachefiles_prepare_write,
716         .prepare_write_subreq   = cachefiles_prepare_write_subreq,
717         .prepare_ondemand_read  = cachefiles_prepare_ondemand_read,
718         .query_occupancy        = cachefiles_query_occupancy,
719 };
720 
721 /*
722  * Open the cache file when beginning a cache operation.
723  */
724 bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
725                                 enum fscache_want_state want_state)
726 {
727         struct cachefiles_object *object = cachefiles_cres_object(cres);
728 
729         if (!cachefiles_cres_file(cres)) {
730                 cres->ops = &cachefiles_netfs_cache_ops;
731                 if (object->file) {
732                         spin_lock(&object->lock);
733                         if (!cres->cache_priv2 && object->file)
734                                 cres->cache_priv2 = get_file(object->file);
735                         spin_unlock(&object->lock);
736                 }
737         }
738 
739         if (!cachefiles_cres_file(cres) && want_state != FSCACHE_WANT_PARAMS) {
740                 pr_err("failed to get cres->file\n");
741                 return false;
742         }
743 
744         return true;
745 }
746 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php