1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/backing-dev.h> 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/swap.h> 8 #include <linux/pagemap.h> 9 #include <linux/slab.h> 10 #include <linux/pagevec.h> 11 #include <linux/task_io_accounting_ops.h> 12 #include <linux/signal.h> 13 #include <linux/iversion.h> 14 #include <linux/ktime.h> 15 #include <linux/netfs.h> 16 17 #include "super.h" 18 #include "mds_client.h" 19 #include "cache.h" 20 #include "metric.h" 21 #include "crypto.h" 22 #include <linux/ceph/osd_client.h> 23 #include <linux/ceph/striper.h> 24 25 /* 26 * Ceph address space ops. 27 * 28 * There are a few funny things going on here. 29 * 30 * The page->private field is used to reference a struct 31 * ceph_snap_context for _every_ dirty page. This indicates which 32 * snapshot the page was logically dirtied in, and thus which snap 33 * context needs to be associated with the osd write during writeback. 34 * 35 * Similarly, struct ceph_inode_info maintains a set of counters to 36 * count dirty pages on the inode. In the absence of snapshots, 37 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 38 * 39 * When a snapshot is taken (that is, when the client receives 40 * notification that a snapshot was taken), each inode with caps and 41 * with dirty pages (dirty pages implies there is a cap) gets a new 42 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 43 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 44 * moved to capsnap->dirty. (Unless a sync write is currently in 45 * progress. In that case, the capsnap is said to be "pending", new 46 * writes cannot start, and the capsnap isn't "finalized" until the 47 * write completes (or fails) and a final size/mtime for the inode for 48 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 49 * 50 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 51 * we look for the first capsnap in i_cap_snaps and write out pages in 52 * that snap context _only_. Then we move on to the next capsnap, 53 * eventually reaching the "live" or "head" context (i.e., pages that 54 * are not yet snapped) and are writing the most recently dirtied 55 * pages. 56 * 57 * Invalidate and so forth must take care to ensure the dirty page 58 * accounting is preserved. 59 */ 60 61 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 62 #define CONGESTION_OFF_THRESH(congestion_kb) \ 63 (CONGESTION_ON_THRESH(congestion_kb) - \ 64 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 65 66 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 67 struct folio **foliop, void **_fsdata); 68 69 static inline struct ceph_snap_context *page_snap_context(struct page *page) 70 { 71 if (PagePrivate(page)) 72 return (void *)page->private; 73 return NULL; 74 } 75 76 /* 77 * Dirty a page. Optimistically adjust accounting, on the assumption 78 * that we won't race with invalidate. If we do, readjust. 79 */ 80 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio) 81 { 82 struct inode *inode = mapping->host; 83 struct ceph_client *cl = ceph_inode_to_client(inode); 84 struct ceph_inode_info *ci; 85 struct ceph_snap_context *snapc; 86 87 if (folio_test_dirty(folio)) { 88 doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n", 89 ceph_vinop(inode), folio, folio->index); 90 VM_BUG_ON_FOLIO(!folio_test_private(folio), folio); 91 return false; 92 } 93 94 ci = ceph_inode(inode); 95 96 /* dirty the head */ 97 spin_lock(&ci->i_ceph_lock); 98 if (__ceph_have_pending_cap_snap(ci)) { 99 struct ceph_cap_snap *capsnap = 100 list_last_entry(&ci->i_cap_snaps, 101 struct ceph_cap_snap, 102 ci_item); 103 snapc = ceph_get_snap_context(capsnap->context); 104 capsnap->dirty_pages++; 105 } else { 106 BUG_ON(!ci->i_head_snapc); 107 snapc = ceph_get_snap_context(ci->i_head_snapc); 108 ++ci->i_wrbuffer_ref_head; 109 } 110 if (ci->i_wrbuffer_ref == 0) 111 ihold(inode); 112 ++ci->i_wrbuffer_ref; 113 doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d " 114 "snapc %p seq %lld (%d snaps)\n", 115 ceph_vinop(inode), folio, folio->index, 116 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 117 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 118 snapc, snapc->seq, snapc->num_snaps); 119 spin_unlock(&ci->i_ceph_lock); 120 121 /* 122 * Reference snap context in folio->private. Also set 123 * PagePrivate so that we get invalidate_folio callback. 124 */ 125 VM_WARN_ON_FOLIO(folio->private, folio); 126 folio_attach_private(folio, snapc); 127 128 return ceph_fscache_dirty_folio(mapping, folio); 129 } 130 131 /* 132 * If we are truncating the full folio (i.e. offset == 0), adjust the 133 * dirty folio counters appropriately. Only called if there is private 134 * data on the folio. 135 */ 136 static void ceph_invalidate_folio(struct folio *folio, size_t offset, 137 size_t length) 138 { 139 struct inode *inode = folio->mapping->host; 140 struct ceph_client *cl = ceph_inode_to_client(inode); 141 struct ceph_inode_info *ci = ceph_inode(inode); 142 struct ceph_snap_context *snapc; 143 144 145 if (offset != 0 || length != folio_size(folio)) { 146 doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n", 147 ceph_vinop(inode), folio->index, offset, length); 148 return; 149 } 150 151 WARN_ON(!folio_test_locked(folio)); 152 if (folio_test_private(folio)) { 153 doutc(cl, "%llx.%llx idx %lu full dirty page\n", 154 ceph_vinop(inode), folio->index); 155 156 snapc = folio_detach_private(folio); 157 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 158 ceph_put_snap_context(snapc); 159 } 160 161 netfs_invalidate_folio(folio, offset, length); 162 } 163 164 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) 165 { 166 struct inode *inode = rreq->inode; 167 struct ceph_inode_info *ci = ceph_inode(inode); 168 struct ceph_file_layout *lo = &ci->i_layout; 169 unsigned long max_pages = inode->i_sb->s_bdi->ra_pages; 170 loff_t end = rreq->start + rreq->len, new_end; 171 struct ceph_netfs_request_data *priv = rreq->netfs_priv; 172 unsigned long max_len; 173 u32 blockoff; 174 175 if (priv) { 176 /* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */ 177 if (priv->file_ra_disabled) 178 max_pages = 0; 179 else 180 max_pages = priv->file_ra_pages; 181 182 } 183 184 /* Readahead is disabled */ 185 if (!max_pages) 186 return; 187 188 max_len = max_pages << PAGE_SHIFT; 189 190 /* 191 * Try to expand the length forward by rounding up it to the next 192 * block, but do not exceed the file size, unless the original 193 * request already exceeds it. 194 */ 195 new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size); 196 if (new_end > end && new_end <= rreq->start + max_len) 197 rreq->len = new_end - rreq->start; 198 199 /* Try to expand the start downward */ 200 div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); 201 if (rreq->len + blockoff <= max_len) { 202 rreq->start -= blockoff; 203 rreq->len += blockoff; 204 } 205 } 206 207 static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq) 208 { 209 struct inode *inode = subreq->rreq->inode; 210 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 211 struct ceph_inode_info *ci = ceph_inode(inode); 212 u64 objno, objoff; 213 u32 xlen; 214 215 /* Truncate the extent at the end of the current block */ 216 ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, 217 &objno, &objoff, &xlen); 218 subreq->len = min(xlen, fsc->mount_options->rsize); 219 return true; 220 } 221 222 static void finish_netfs_read(struct ceph_osd_request *req) 223 { 224 struct inode *inode = req->r_inode; 225 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 226 struct ceph_client *cl = fsc->client; 227 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 228 struct netfs_io_subrequest *subreq = req->r_priv; 229 struct ceph_osd_req_op *op = &req->r_ops[0]; 230 int err = req->r_result; 231 bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ); 232 233 ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 234 req->r_end_latency, osd_data->length, err); 235 236 doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result, 237 subreq->len, i_size_read(req->r_inode)); 238 239 /* no object means success but no data */ 240 if (err == -ENOENT) 241 err = 0; 242 else if (err == -EBLOCKLISTED) 243 fsc->blocklisted = true; 244 245 if (err >= 0) { 246 if (sparse && err > 0) 247 err = ceph_sparse_ext_map_end(op); 248 if (err < subreq->len && 249 subreq->rreq->origin != NETFS_DIO_READ) 250 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 251 if (IS_ENCRYPTED(inode) && err > 0) { 252 err = ceph_fscrypt_decrypt_extents(inode, 253 osd_data->pages, subreq->start, 254 op->extent.sparse_ext, 255 op->extent.sparse_ext_cnt); 256 if (err > subreq->len) 257 err = subreq->len; 258 } 259 } 260 261 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 262 ceph_put_page_vector(osd_data->pages, 263 calc_pages_for(osd_data->alignment, 264 osd_data->length), false); 265 } 266 netfs_subreq_terminated(subreq, err, false); 267 iput(req->r_inode); 268 ceph_dec_osd_stopping_blocker(fsc->mdsc); 269 } 270 271 static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) 272 { 273 struct netfs_io_request *rreq = subreq->rreq; 274 struct inode *inode = rreq->inode; 275 struct ceph_mds_reply_info_parsed *rinfo; 276 struct ceph_mds_reply_info_in *iinfo; 277 struct ceph_mds_request *req; 278 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 279 struct ceph_inode_info *ci = ceph_inode(inode); 280 struct iov_iter iter; 281 ssize_t err = 0; 282 size_t len; 283 int mode; 284 285 if (rreq->origin != NETFS_DIO_READ) 286 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 287 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 288 289 if (subreq->start >= inode->i_size) 290 goto out; 291 292 /* We need to fetch the inline data. */ 293 mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA); 294 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode); 295 if (IS_ERR(req)) { 296 err = PTR_ERR(req); 297 goto out; 298 } 299 req->r_ino1 = ci->i_vino; 300 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA); 301 req->r_num_caps = 2; 302 303 err = ceph_mdsc_do_request(mdsc, NULL, req); 304 if (err < 0) 305 goto out; 306 307 rinfo = &req->r_reply_info; 308 iinfo = &rinfo->targeti; 309 if (iinfo->inline_version == CEPH_INLINE_NONE) { 310 /* The data got uninlined */ 311 ceph_mdsc_put_request(req); 312 return false; 313 } 314 315 len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len); 316 iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); 317 err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter); 318 if (err == 0) 319 err = -EFAULT; 320 321 ceph_mdsc_put_request(req); 322 out: 323 netfs_subreq_terminated(subreq, err, false); 324 return true; 325 } 326 327 static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) 328 { 329 struct netfs_io_request *rreq = subreq->rreq; 330 struct inode *inode = rreq->inode; 331 struct ceph_inode_info *ci = ceph_inode(inode); 332 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 333 struct ceph_client *cl = fsc->client; 334 struct ceph_osd_request *req = NULL; 335 struct ceph_vino vino = ceph_vino(inode); 336 struct iov_iter iter; 337 int err = 0; 338 u64 len = subreq->len; 339 bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD); 340 u64 off = subreq->start; 341 int extent_cnt; 342 343 if (ceph_inode_is_shutdown(inode)) { 344 err = -EIO; 345 goto out; 346 } 347 348 if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq)) 349 return; 350 351 ceph_fscrypt_adjust_off_and_len(inode, &off, &len); 352 353 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, 354 off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ, 355 CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq, 356 ci->i_truncate_size, false); 357 if (IS_ERR(req)) { 358 err = PTR_ERR(req); 359 req = NULL; 360 goto out; 361 } 362 363 if (sparse) { 364 extent_cnt = __ceph_sparse_read_ext_count(inode, len); 365 err = ceph_alloc_sparse_ext_map(&req->r_ops[0], extent_cnt); 366 if (err) 367 goto out; 368 } 369 370 doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n", 371 ceph_vinop(inode), subreq->start, subreq->len, len); 372 373 iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); 374 375 /* 376 * FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for 377 * encrypted inodes. We'd need infrastructure that handles an iov_iter 378 * instead of page arrays, and we don't have that as of yet. Once the 379 * dust settles on the write helpers and encrypt/decrypt routines for 380 * netfs, we should be able to rework this. 381 */ 382 if (IS_ENCRYPTED(inode)) { 383 struct page **pages; 384 size_t page_off; 385 386 err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off); 387 if (err < 0) { 388 doutc(cl, "%llx.%llx failed to allocate pages, %d\n", 389 ceph_vinop(inode), err); 390 goto out; 391 } 392 393 /* should always give us a page-aligned read */ 394 WARN_ON_ONCE(page_off); 395 len = err; 396 err = 0; 397 398 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, 399 false); 400 } else { 401 osd_req_op_extent_osd_iter(req, 0, &iter); 402 } 403 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) { 404 err = -EIO; 405 goto out; 406 } 407 req->r_callback = finish_netfs_read; 408 req->r_priv = subreq; 409 req->r_inode = inode; 410 ihold(inode); 411 412 ceph_osdc_start_request(req->r_osdc, req); 413 out: 414 ceph_osdc_put_request(req); 415 if (err) 416 netfs_subreq_terminated(subreq, err, false); 417 doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err); 418 } 419 420 static int ceph_init_request(struct netfs_io_request *rreq, struct file *file) 421 { 422 struct inode *inode = rreq->inode; 423 struct ceph_client *cl = ceph_inode_to_client(inode); 424 int got = 0, want = CEPH_CAP_FILE_CACHE; 425 struct ceph_netfs_request_data *priv; 426 int ret = 0; 427 428 /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 429 __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); 430 431 if (rreq->origin != NETFS_READAHEAD) 432 return 0; 433 434 priv = kzalloc(sizeof(*priv), GFP_NOFS); 435 if (!priv) 436 return -ENOMEM; 437 438 if (file) { 439 struct ceph_rw_context *rw_ctx; 440 struct ceph_file_info *fi = file->private_data; 441 442 priv->file_ra_pages = file->f_ra.ra_pages; 443 priv->file_ra_disabled = file->f_mode & FMODE_RANDOM; 444 445 rw_ctx = ceph_find_rw_context(fi); 446 if (rw_ctx) { 447 rreq->netfs_priv = priv; 448 return 0; 449 } 450 } 451 452 /* 453 * readahead callers do not necessarily hold Fcb caps 454 * (e.g. fadvise, madvise). 455 */ 456 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 457 if (ret < 0) { 458 doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode)); 459 goto out; 460 } 461 462 if (!(got & want)) { 463 doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode)); 464 ret = -EACCES; 465 goto out; 466 } 467 if (ret == 0) { 468 ret = -EACCES; 469 goto out; 470 } 471 472 priv->caps = got; 473 rreq->netfs_priv = priv; 474 475 out: 476 if (ret < 0) { 477 if (got) 478 ceph_put_cap_refs(ceph_inode(inode), got); 479 kfree(priv); 480 } 481 482 return ret; 483 } 484 485 static void ceph_netfs_free_request(struct netfs_io_request *rreq) 486 { 487 struct ceph_netfs_request_data *priv = rreq->netfs_priv; 488 489 if (!priv) 490 return; 491 492 if (priv->caps) 493 ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps); 494 kfree(priv); 495 rreq->netfs_priv = NULL; 496 } 497 498 const struct netfs_request_ops ceph_netfs_ops = { 499 .init_request = ceph_init_request, 500 .free_request = ceph_netfs_free_request, 501 .issue_read = ceph_netfs_issue_read, 502 .expand_readahead = ceph_netfs_expand_readahead, 503 .clamp_length = ceph_netfs_clamp_length, 504 .check_write_begin = ceph_netfs_check_write_begin, 505 }; 506 507 #ifdef CONFIG_CEPH_FSCACHE 508 static void ceph_set_page_fscache(struct page *page) 509 { 510 folio_start_private_2(page_folio(page)); /* [DEPRECATED] */ 511 } 512 513 static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) 514 { 515 struct inode *inode = priv; 516 517 if (IS_ERR_VALUE(error) && error != -ENOBUFS) 518 ceph_fscache_invalidate(inode, false); 519 } 520 521 static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 522 { 523 struct ceph_inode_info *ci = ceph_inode(inode); 524 struct fscache_cookie *cookie = ceph_fscache_cookie(ci); 525 526 fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode), 527 ceph_fscache_write_terminated, inode, true, caching); 528 } 529 #else 530 static inline void ceph_set_page_fscache(struct page *page) 531 { 532 } 533 534 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 535 { 536 } 537 #endif /* CONFIG_CEPH_FSCACHE */ 538 539 struct ceph_writeback_ctl 540 { 541 loff_t i_size; 542 u64 truncate_size; 543 u32 truncate_seq; 544 bool size_stable; 545 bool head_snapc; 546 }; 547 548 /* 549 * Get ref for the oldest snapc for an inode with dirty data... that is, the 550 * only snap context we are allowed to write back. 551 */ 552 static struct ceph_snap_context * 553 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 554 struct ceph_snap_context *page_snapc) 555 { 556 struct ceph_inode_info *ci = ceph_inode(inode); 557 struct ceph_client *cl = ceph_inode_to_client(inode); 558 struct ceph_snap_context *snapc = NULL; 559 struct ceph_cap_snap *capsnap = NULL; 560 561 spin_lock(&ci->i_ceph_lock); 562 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 563 doutc(cl, " capsnap %p snapc %p has %d dirty pages\n", 564 capsnap, capsnap->context, capsnap->dirty_pages); 565 if (!capsnap->dirty_pages) 566 continue; 567 568 /* get i_size, truncate_{seq,size} for page_snapc? */ 569 if (snapc && capsnap->context != page_snapc) 570 continue; 571 572 if (ctl) { 573 if (capsnap->writing) { 574 ctl->i_size = i_size_read(inode); 575 ctl->size_stable = false; 576 } else { 577 ctl->i_size = capsnap->size; 578 ctl->size_stable = true; 579 } 580 ctl->truncate_size = capsnap->truncate_size; 581 ctl->truncate_seq = capsnap->truncate_seq; 582 ctl->head_snapc = false; 583 } 584 585 if (snapc) 586 break; 587 588 snapc = ceph_get_snap_context(capsnap->context); 589 if (!page_snapc || 590 page_snapc == snapc || 591 page_snapc->seq > snapc->seq) 592 break; 593 } 594 if (!snapc && ci->i_wrbuffer_ref_head) { 595 snapc = ceph_get_snap_context(ci->i_head_snapc); 596 doutc(cl, " head snapc %p has %d dirty pages\n", snapc, 597 ci->i_wrbuffer_ref_head); 598 if (ctl) { 599 ctl->i_size = i_size_read(inode); 600 ctl->truncate_size = ci->i_truncate_size; 601 ctl->truncate_seq = ci->i_truncate_seq; 602 ctl->size_stable = false; 603 ctl->head_snapc = true; 604 } 605 } 606 spin_unlock(&ci->i_ceph_lock); 607 return snapc; 608 } 609 610 static u64 get_writepages_data_length(struct inode *inode, 611 struct page *page, u64 start) 612 { 613 struct ceph_inode_info *ci = ceph_inode(inode); 614 struct ceph_snap_context *snapc; 615 struct ceph_cap_snap *capsnap = NULL; 616 u64 end = i_size_read(inode); 617 u64 ret; 618 619 snapc = page_snap_context(ceph_fscrypt_pagecache_page(page)); 620 if (snapc != ci->i_head_snapc) { 621 bool found = false; 622 spin_lock(&ci->i_ceph_lock); 623 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 624 if (capsnap->context == snapc) { 625 if (!capsnap->writing) 626 end = capsnap->size; 627 found = true; 628 break; 629 } 630 } 631 spin_unlock(&ci->i_ceph_lock); 632 WARN_ON(!found); 633 } 634 if (end > ceph_fscrypt_page_offset(page) + thp_size(page)) 635 end = ceph_fscrypt_page_offset(page) + thp_size(page); 636 ret = end > start ? end - start : 0; 637 if (ret && fscrypt_is_bounce_page(page)) 638 ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE); 639 return ret; 640 } 641 642 /* 643 * Write a single page, but leave the page locked. 644 * 645 * If we get a write error, mark the mapping for error, but still adjust the 646 * dirty page accounting (i.e., page is no longer dirty). 647 */ 648 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 649 { 650 struct folio *folio = page_folio(page); 651 struct inode *inode = page->mapping->host; 652 struct ceph_inode_info *ci = ceph_inode(inode); 653 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 654 struct ceph_client *cl = fsc->client; 655 struct ceph_snap_context *snapc, *oldest; 656 loff_t page_off = page_offset(page); 657 int err; 658 loff_t len = thp_size(page); 659 loff_t wlen; 660 struct ceph_writeback_ctl ceph_wbc; 661 struct ceph_osd_client *osdc = &fsc->client->osdc; 662 struct ceph_osd_request *req; 663 bool caching = ceph_is_cache_enabled(inode); 664 struct page *bounce_page = NULL; 665 666 doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page, 667 page->index); 668 669 if (ceph_inode_is_shutdown(inode)) 670 return -EIO; 671 672 /* verify this is a writeable snap context */ 673 snapc = page_snap_context(page); 674 if (!snapc) { 675 doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode), 676 page); 677 return 0; 678 } 679 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 680 if (snapc->seq > oldest->seq) { 681 doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n", 682 ceph_vinop(inode), page, snapc); 683 /* we should only noop if called by kswapd */ 684 WARN_ON(!(current->flags & PF_MEMALLOC)); 685 ceph_put_snap_context(oldest); 686 redirty_page_for_writepage(wbc, page); 687 return 0; 688 } 689 ceph_put_snap_context(oldest); 690 691 /* is this a partial page at end of file? */ 692 if (page_off >= ceph_wbc.i_size) { 693 doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n", 694 ceph_vinop(inode), folio->index, ceph_wbc.i_size); 695 folio_invalidate(folio, 0, folio_size(folio)); 696 return 0; 697 } 698 699 if (ceph_wbc.i_size < page_off + len) 700 len = ceph_wbc.i_size - page_off; 701 702 wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len; 703 doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n", 704 ceph_vinop(inode), page, page->index, page_off, wlen, snapc, 705 snapc->seq); 706 707 if (atomic_long_inc_return(&fsc->writeback_count) > 708 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 709 fsc->write_congested = true; 710 711 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), 712 page_off, &wlen, 0, 1, CEPH_OSD_OP_WRITE, 713 CEPH_OSD_FLAG_WRITE, snapc, 714 ceph_wbc.truncate_seq, 715 ceph_wbc.truncate_size, true); 716 if (IS_ERR(req)) { 717 redirty_page_for_writepage(wbc, page); 718 return PTR_ERR(req); 719 } 720 721 if (wlen < len) 722 len = wlen; 723 724 set_page_writeback(page); 725 if (caching) 726 ceph_set_page_fscache(page); 727 ceph_fscache_write_to_cache(inode, page_off, len, caching); 728 729 if (IS_ENCRYPTED(inode)) { 730 bounce_page = fscrypt_encrypt_pagecache_blocks(page, 731 CEPH_FSCRYPT_BLOCK_SIZE, 0, 732 GFP_NOFS); 733 if (IS_ERR(bounce_page)) { 734 redirty_page_for_writepage(wbc, page); 735 end_page_writeback(page); 736 ceph_osdc_put_request(req); 737 return PTR_ERR(bounce_page); 738 } 739 } 740 741 /* it may be a short write due to an object boundary */ 742 WARN_ON_ONCE(len > thp_size(page)); 743 osd_req_op_extent_osd_data_pages(req, 0, 744 bounce_page ? &bounce_page : &page, wlen, 0, 745 false, false); 746 doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n", 747 ceph_vinop(inode), page_off, len, wlen, 748 IS_ENCRYPTED(inode) ? "" : "not "); 749 750 req->r_mtime = inode_get_mtime(inode); 751 ceph_osdc_start_request(osdc, req); 752 err = ceph_osdc_wait_request(osdc, req); 753 754 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 755 req->r_end_latency, len, err); 756 fscrypt_free_bounce_page(bounce_page); 757 ceph_osdc_put_request(req); 758 if (err == 0) 759 err = len; 760 761 if (err < 0) { 762 struct writeback_control tmp_wbc; 763 if (!wbc) 764 wbc = &tmp_wbc; 765 if (err == -ERESTARTSYS) { 766 /* killed by SIGKILL */ 767 doutc(cl, "%llx.%llx interrupted page %p\n", 768 ceph_vinop(inode), page); 769 redirty_page_for_writepage(wbc, page); 770 end_page_writeback(page); 771 return err; 772 } 773 if (err == -EBLOCKLISTED) 774 fsc->blocklisted = true; 775 doutc(cl, "%llx.%llx setting page/mapping error %d %p\n", 776 ceph_vinop(inode), err, page); 777 mapping_set_error(&inode->i_data, err); 778 wbc->pages_skipped++; 779 } else { 780 doutc(cl, "%llx.%llx cleaned page %p\n", 781 ceph_vinop(inode), page); 782 err = 0; /* vfs expects us to return 0 */ 783 } 784 oldest = detach_page_private(page); 785 WARN_ON_ONCE(oldest != snapc); 786 end_page_writeback(page); 787 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 788 ceph_put_snap_context(snapc); /* page's reference */ 789 790 if (atomic_long_dec_return(&fsc->writeback_count) < 791 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 792 fsc->write_congested = false; 793 794 return err; 795 } 796 797 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 798 { 799 int err; 800 struct inode *inode = page->mapping->host; 801 BUG_ON(!inode); 802 ihold(inode); 803 804 if (wbc->sync_mode == WB_SYNC_NONE && 805 ceph_inode_to_fs_client(inode)->write_congested) { 806 redirty_page_for_writepage(wbc, page); 807 return AOP_WRITEPAGE_ACTIVATE; 808 } 809 810 folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */ 811 812 err = writepage_nounlock(page, wbc); 813 if (err == -ERESTARTSYS) { 814 /* direct memory reclaimer was killed by SIGKILL. return 0 815 * to prevent caller from setting mapping/page error */ 816 err = 0; 817 } 818 unlock_page(page); 819 iput(inode); 820 return err; 821 } 822 823 /* 824 * async writeback completion handler. 825 * 826 * If we get an error, set the mapping error bit, but not the individual 827 * page error bits. 828 */ 829 static void writepages_finish(struct ceph_osd_request *req) 830 { 831 struct inode *inode = req->r_inode; 832 struct ceph_inode_info *ci = ceph_inode(inode); 833 struct ceph_client *cl = ceph_inode_to_client(inode); 834 struct ceph_osd_data *osd_data; 835 struct page *page; 836 int num_pages, total_pages = 0; 837 int i, j; 838 int rc = req->r_result; 839 struct ceph_snap_context *snapc = req->r_snapc; 840 struct address_space *mapping = inode->i_mapping; 841 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 842 unsigned int len = 0; 843 bool remove_page; 844 845 doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc); 846 if (rc < 0) { 847 mapping_set_error(mapping, rc); 848 ceph_set_error_write(ci); 849 if (rc == -EBLOCKLISTED) 850 fsc->blocklisted = true; 851 } else { 852 ceph_clear_error_write(ci); 853 } 854 855 /* 856 * We lost the cache cap, need to truncate the page before 857 * it is unlocked, otherwise we'd truncate it later in the 858 * page truncation thread, possibly losing some data that 859 * raced its way in 860 */ 861 remove_page = !(ceph_caps_issued(ci) & 862 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 863 864 /* clean all pages */ 865 for (i = 0; i < req->r_num_ops; i++) { 866 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) { 867 pr_warn_client(cl, 868 "%llx.%llx incorrect op %d req %p index %d tid %llu\n", 869 ceph_vinop(inode), req->r_ops[i].op, req, i, 870 req->r_tid); 871 break; 872 } 873 874 osd_data = osd_req_op_extent_osd_data(req, i); 875 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 876 len += osd_data->length; 877 num_pages = calc_pages_for((u64)osd_data->alignment, 878 (u64)osd_data->length); 879 total_pages += num_pages; 880 for (j = 0; j < num_pages; j++) { 881 page = osd_data->pages[j]; 882 if (fscrypt_is_bounce_page(page)) { 883 page = fscrypt_pagecache_page(page); 884 fscrypt_free_bounce_page(osd_data->pages[j]); 885 osd_data->pages[j] = page; 886 } 887 BUG_ON(!page); 888 WARN_ON(!PageUptodate(page)); 889 890 if (atomic_long_dec_return(&fsc->writeback_count) < 891 CONGESTION_OFF_THRESH( 892 fsc->mount_options->congestion_kb)) 893 fsc->write_congested = false; 894 895 ceph_put_snap_context(detach_page_private(page)); 896 end_page_writeback(page); 897 doutc(cl, "unlocking %p\n", page); 898 899 if (remove_page) 900 generic_error_remove_folio(inode->i_mapping, 901 page_folio(page)); 902 903 unlock_page(page); 904 } 905 doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n", 906 ceph_vinop(inode), osd_data->length, 907 rc >= 0 ? num_pages : 0); 908 909 release_pages(osd_data->pages, num_pages); 910 } 911 912 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 913 req->r_end_latency, len, rc); 914 915 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 916 917 osd_data = osd_req_op_extent_osd_data(req, 0); 918 if (osd_data->pages_from_pool) 919 mempool_free(osd_data->pages, ceph_wb_pagevec_pool); 920 else 921 kfree(osd_data->pages); 922 ceph_osdc_put_request(req); 923 ceph_dec_osd_stopping_blocker(fsc->mdsc); 924 } 925 926 /* 927 * initiate async writeback 928 */ 929 static int ceph_writepages_start(struct address_space *mapping, 930 struct writeback_control *wbc) 931 { 932 struct inode *inode = mapping->host; 933 struct ceph_inode_info *ci = ceph_inode(inode); 934 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 935 struct ceph_client *cl = fsc->client; 936 struct ceph_vino vino = ceph_vino(inode); 937 pgoff_t index, start_index, end = -1; 938 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 939 struct folio_batch fbatch; 940 int rc = 0; 941 unsigned int wsize = i_blocksize(inode); 942 struct ceph_osd_request *req = NULL; 943 struct ceph_writeback_ctl ceph_wbc; 944 bool should_loop, range_whole = false; 945 bool done = false; 946 bool caching = ceph_is_cache_enabled(inode); 947 xa_mark_t tag; 948 949 if (wbc->sync_mode == WB_SYNC_NONE && 950 fsc->write_congested) 951 return 0; 952 953 doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode), 954 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 955 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 956 957 if (ceph_inode_is_shutdown(inode)) { 958 if (ci->i_wrbuffer_ref > 0) { 959 pr_warn_ratelimited_client(cl, 960 "%llx.%llx %lld forced umount\n", 961 ceph_vinop(inode), ceph_ino(inode)); 962 } 963 mapping_set_error(mapping, -EIO); 964 return -EIO; /* we're in a forced umount, don't write! */ 965 } 966 if (fsc->mount_options->wsize < wsize) 967 wsize = fsc->mount_options->wsize; 968 969 folio_batch_init(&fbatch); 970 971 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 972 index = start_index; 973 974 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) { 975 tag = PAGECACHE_TAG_TOWRITE; 976 } else { 977 tag = PAGECACHE_TAG_DIRTY; 978 } 979 retry: 980 /* find oldest snap context with dirty data */ 981 snapc = get_oldest_context(inode, &ceph_wbc, NULL); 982 if (!snapc) { 983 /* hmm, why does writepages get called when there 984 is no dirty data? */ 985 doutc(cl, " no snap context with dirty data?\n"); 986 goto out; 987 } 988 doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", snapc, 989 snapc->seq, snapc->num_snaps); 990 991 should_loop = false; 992 if (ceph_wbc.head_snapc && snapc != last_snapc) { 993 /* where to start/end? */ 994 if (wbc->range_cyclic) { 995 index = start_index; 996 end = -1; 997 if (index > 0) 998 should_loop = true; 999 doutc(cl, " cyclic, start at %lu\n", index); 1000 } else { 1001 index = wbc->range_start >> PAGE_SHIFT; 1002 end = wbc->range_end >> PAGE_SHIFT; 1003 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1004 range_whole = true; 1005 doutc(cl, " not cyclic, %lu to %lu\n", index, end); 1006 } 1007 } else if (!ceph_wbc.head_snapc) { 1008 /* Do not respect wbc->range_{start,end}. Dirty pages 1009 * in that range can be associated with newer snapc. 1010 * They are not writeable until we write all dirty pages 1011 * associated with 'snapc' get written */ 1012 if (index > 0) 1013 should_loop = true; 1014 doutc(cl, " non-head snapc, range whole\n"); 1015 } 1016 1017 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 1018 tag_pages_for_writeback(mapping, index, end); 1019 1020 ceph_put_snap_context(last_snapc); 1021 last_snapc = snapc; 1022 1023 while (!done && index <= end) { 1024 int num_ops = 0, op_idx; 1025 unsigned i, nr_folios, max_pages, locked_pages = 0; 1026 struct page **pages = NULL, **data_pages; 1027 struct page *page; 1028 pgoff_t strip_unit_end = 0; 1029 u64 offset = 0, len = 0; 1030 bool from_pool = false; 1031 1032 max_pages = wsize >> PAGE_SHIFT; 1033 1034 get_more_pages: 1035 nr_folios = filemap_get_folios_tag(mapping, &index, 1036 end, tag, &fbatch); 1037 doutc(cl, "pagevec_lookup_range_tag got %d\n", nr_folios); 1038 if (!nr_folios && !locked_pages) 1039 break; 1040 for (i = 0; i < nr_folios && locked_pages < max_pages; i++) { 1041 page = &fbatch.folios[i]->page; 1042 doutc(cl, "? %p idx %lu\n", page, page->index); 1043 if (locked_pages == 0) 1044 lock_page(page); /* first page */ 1045 else if (!trylock_page(page)) 1046 break; 1047 1048 /* only dirty pages, or our accounting breaks */ 1049 if (unlikely(!PageDirty(page)) || 1050 unlikely(page->mapping != mapping)) { 1051 doutc(cl, "!dirty or !mapping %p\n", page); 1052 unlock_page(page); 1053 continue; 1054 } 1055 /* only if matching snap context */ 1056 pgsnapc = page_snap_context(page); 1057 if (pgsnapc != snapc) { 1058 doutc(cl, "page snapc %p %lld != oldest %p %lld\n", 1059 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 1060 if (!should_loop && 1061 !ceph_wbc.head_snapc && 1062 wbc->sync_mode != WB_SYNC_NONE) 1063 should_loop = true; 1064 unlock_page(page); 1065 continue; 1066 } 1067 if (page_offset(page) >= ceph_wbc.i_size) { 1068 struct folio *folio = page_folio(page); 1069 1070 doutc(cl, "folio at %lu beyond eof %llu\n", 1071 folio->index, ceph_wbc.i_size); 1072 if ((ceph_wbc.size_stable || 1073 folio_pos(folio) >= i_size_read(inode)) && 1074 folio_clear_dirty_for_io(folio)) 1075 folio_invalidate(folio, 0, 1076 folio_size(folio)); 1077 folio_unlock(folio); 1078 continue; 1079 } 1080 if (strip_unit_end && (page->index > strip_unit_end)) { 1081 doutc(cl, "end of strip unit %p\n", page); 1082 unlock_page(page); 1083 break; 1084 } 1085 if (PageWriteback(page) || 1086 PagePrivate2(page) /* [DEPRECATED] */) { 1087 if (wbc->sync_mode == WB_SYNC_NONE) { 1088 doutc(cl, "%p under writeback\n", page); 1089 unlock_page(page); 1090 continue; 1091 } 1092 doutc(cl, "waiting on writeback %p\n", page); 1093 wait_on_page_writeback(page); 1094 folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */ 1095 } 1096 1097 if (!clear_page_dirty_for_io(page)) { 1098 doutc(cl, "%p !clear_page_dirty_for_io\n", page); 1099 unlock_page(page); 1100 continue; 1101 } 1102 1103 /* 1104 * We have something to write. If this is 1105 * the first locked page this time through, 1106 * calculate max possinle write size and 1107 * allocate a page array 1108 */ 1109 if (locked_pages == 0) { 1110 u64 objnum; 1111 u64 objoff; 1112 u32 xlen; 1113 1114 /* prepare async write request */ 1115 offset = (u64)page_offset(page); 1116 ceph_calc_file_object_mapping(&ci->i_layout, 1117 offset, wsize, 1118 &objnum, &objoff, 1119 &xlen); 1120 len = xlen; 1121 1122 num_ops = 1; 1123 strip_unit_end = page->index + 1124 ((len - 1) >> PAGE_SHIFT); 1125 1126 BUG_ON(pages); 1127 max_pages = calc_pages_for(0, (u64)len); 1128 pages = kmalloc_array(max_pages, 1129 sizeof(*pages), 1130 GFP_NOFS); 1131 if (!pages) { 1132 from_pool = true; 1133 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 1134 BUG_ON(!pages); 1135 } 1136 1137 len = 0; 1138 } else if (page->index != 1139 (offset + len) >> PAGE_SHIFT) { 1140 if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : 1141 CEPH_OSD_MAX_OPS)) { 1142 redirty_page_for_writepage(wbc, page); 1143 unlock_page(page); 1144 break; 1145 } 1146 1147 num_ops++; 1148 offset = (u64)page_offset(page); 1149 len = 0; 1150 } 1151 1152 /* note position of first page in fbatch */ 1153 doutc(cl, "%llx.%llx will write page %p idx %lu\n", 1154 ceph_vinop(inode), page, page->index); 1155 1156 if (atomic_long_inc_return(&fsc->writeback_count) > 1157 CONGESTION_ON_THRESH( 1158 fsc->mount_options->congestion_kb)) 1159 fsc->write_congested = true; 1160 1161 if (IS_ENCRYPTED(inode)) { 1162 pages[locked_pages] = 1163 fscrypt_encrypt_pagecache_blocks(page, 1164 PAGE_SIZE, 0, 1165 locked_pages ? GFP_NOWAIT : GFP_NOFS); 1166 if (IS_ERR(pages[locked_pages])) { 1167 if (PTR_ERR(pages[locked_pages]) == -EINVAL) 1168 pr_err_client(cl, 1169 "inode->i_blkbits=%hhu\n", 1170 inode->i_blkbits); 1171 /* better not fail on first page! */ 1172 BUG_ON(locked_pages == 0); 1173 pages[locked_pages] = NULL; 1174 redirty_page_for_writepage(wbc, page); 1175 unlock_page(page); 1176 break; 1177 } 1178 ++locked_pages; 1179 } else { 1180 pages[locked_pages++] = page; 1181 } 1182 1183 fbatch.folios[i] = NULL; 1184 len += thp_size(page); 1185 } 1186 1187 /* did we get anything? */ 1188 if (!locked_pages) 1189 goto release_folios; 1190 if (i) { 1191 unsigned j, n = 0; 1192 /* shift unused page to beginning of fbatch */ 1193 for (j = 0; j < nr_folios; j++) { 1194 if (!fbatch.folios[j]) 1195 continue; 1196 if (n < j) 1197 fbatch.folios[n] = fbatch.folios[j]; 1198 n++; 1199 } 1200 fbatch.nr = n; 1201 1202 if (nr_folios && i == nr_folios && 1203 locked_pages < max_pages) { 1204 doutc(cl, "reached end fbatch, trying for more\n"); 1205 folio_batch_release(&fbatch); 1206 goto get_more_pages; 1207 } 1208 } 1209 1210 new_request: 1211 offset = ceph_fscrypt_page_offset(pages[0]); 1212 len = wsize; 1213 1214 req = ceph_osdc_new_request(&fsc->client->osdc, 1215 &ci->i_layout, vino, 1216 offset, &len, 0, num_ops, 1217 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1218 snapc, ceph_wbc.truncate_seq, 1219 ceph_wbc.truncate_size, false); 1220 if (IS_ERR(req)) { 1221 req = ceph_osdc_new_request(&fsc->client->osdc, 1222 &ci->i_layout, vino, 1223 offset, &len, 0, 1224 min(num_ops, 1225 CEPH_OSD_SLAB_OPS), 1226 CEPH_OSD_OP_WRITE, 1227 CEPH_OSD_FLAG_WRITE, 1228 snapc, ceph_wbc.truncate_seq, 1229 ceph_wbc.truncate_size, true); 1230 BUG_ON(IS_ERR(req)); 1231 } 1232 BUG_ON(len < ceph_fscrypt_page_offset(pages[locked_pages - 1]) + 1233 thp_size(pages[locked_pages - 1]) - offset); 1234 1235 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) { 1236 rc = -EIO; 1237 goto release_folios; 1238 } 1239 req->r_callback = writepages_finish; 1240 req->r_inode = inode; 1241 1242 /* Format the osd request message and submit the write */ 1243 len = 0; 1244 data_pages = pages; 1245 op_idx = 0; 1246 for (i = 0; i < locked_pages; i++) { 1247 struct page *page = ceph_fscrypt_pagecache_page(pages[i]); 1248 1249 u64 cur_offset = page_offset(page); 1250 /* 1251 * Discontinuity in page range? Ceph can handle that by just passing 1252 * multiple extents in the write op. 1253 */ 1254 if (offset + len != cur_offset) { 1255 /* If it's full, stop here */ 1256 if (op_idx + 1 == req->r_num_ops) 1257 break; 1258 1259 /* Kick off an fscache write with what we have so far. */ 1260 ceph_fscache_write_to_cache(inode, offset, len, caching); 1261 1262 /* Start a new extent */ 1263 osd_req_op_extent_dup_last(req, op_idx, 1264 cur_offset - offset); 1265 doutc(cl, "got pages at %llu~%llu\n", offset, 1266 len); 1267 osd_req_op_extent_osd_data_pages(req, op_idx, 1268 data_pages, len, 0, 1269 from_pool, false); 1270 osd_req_op_extent_update(req, op_idx, len); 1271 1272 len = 0; 1273 offset = cur_offset; 1274 data_pages = pages + i; 1275 op_idx++; 1276 } 1277 1278 set_page_writeback(page); 1279 if (caching) 1280 ceph_set_page_fscache(page); 1281 len += thp_size(page); 1282 } 1283 ceph_fscache_write_to_cache(inode, offset, len, caching); 1284 1285 if (ceph_wbc.size_stable) { 1286 len = min(len, ceph_wbc.i_size - offset); 1287 } else if (i == locked_pages) { 1288 /* writepages_finish() clears writeback pages 1289 * according to the data length, so make sure 1290 * data length covers all locked pages */ 1291 u64 min_len = len + 1 - thp_size(page); 1292 len = get_writepages_data_length(inode, pages[i - 1], 1293 offset); 1294 len = max(len, min_len); 1295 } 1296 if (IS_ENCRYPTED(inode)) 1297 len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE); 1298 1299 doutc(cl, "got pages at %llu~%llu\n", offset, len); 1300 1301 if (IS_ENCRYPTED(inode) && 1302 ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK)) 1303 pr_warn_client(cl, 1304 "bad encrypted write offset=%lld len=%llu\n", 1305 offset, len); 1306 1307 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1308 0, from_pool, false); 1309 osd_req_op_extent_update(req, op_idx, len); 1310 1311 BUG_ON(op_idx + 1 != req->r_num_ops); 1312 1313 from_pool = false; 1314 if (i < locked_pages) { 1315 BUG_ON(num_ops <= req->r_num_ops); 1316 num_ops -= req->r_num_ops; 1317 locked_pages -= i; 1318 1319 /* allocate new pages array for next request */ 1320 data_pages = pages; 1321 pages = kmalloc_array(locked_pages, sizeof(*pages), 1322 GFP_NOFS); 1323 if (!pages) { 1324 from_pool = true; 1325 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 1326 BUG_ON(!pages); 1327 } 1328 memcpy(pages, data_pages + i, 1329 locked_pages * sizeof(*pages)); 1330 memset(data_pages + i, 0, 1331 locked_pages * sizeof(*pages)); 1332 } else { 1333 BUG_ON(num_ops != req->r_num_ops); 1334 index = pages[i - 1]->index + 1; 1335 /* request message now owns the pages array */ 1336 pages = NULL; 1337 } 1338 1339 req->r_mtime = inode_get_mtime(inode); 1340 ceph_osdc_start_request(&fsc->client->osdc, req); 1341 req = NULL; 1342 1343 wbc->nr_to_write -= i; 1344 if (pages) 1345 goto new_request; 1346 1347 /* 1348 * We stop writing back only if we are not doing 1349 * integrity sync. In case of integrity sync we have to 1350 * keep going until we have written all the pages 1351 * we tagged for writeback prior to entering this loop. 1352 */ 1353 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1354 done = true; 1355 1356 release_folios: 1357 doutc(cl, "folio_batch release on %d folios (%p)\n", 1358 (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL); 1359 folio_batch_release(&fbatch); 1360 } 1361 1362 if (should_loop && !done) { 1363 /* more to do; loop back to beginning of file */ 1364 doutc(cl, "looping back to beginning of file\n"); 1365 end = start_index - 1; /* OK even when start_index == 0 */ 1366 1367 /* to write dirty pages associated with next snapc, 1368 * we need to wait until current writes complete */ 1369 if (wbc->sync_mode != WB_SYNC_NONE && 1370 start_index == 0 && /* all dirty pages were checked */ 1371 !ceph_wbc.head_snapc) { 1372 struct page *page; 1373 unsigned i, nr; 1374 index = 0; 1375 while ((index <= end) && 1376 (nr = filemap_get_folios_tag(mapping, &index, 1377 (pgoff_t)-1, 1378 PAGECACHE_TAG_WRITEBACK, 1379 &fbatch))) { 1380 for (i = 0; i < nr; i++) { 1381 page = &fbatch.folios[i]->page; 1382 if (page_snap_context(page) != snapc) 1383 continue; 1384 wait_on_page_writeback(page); 1385 } 1386 folio_batch_release(&fbatch); 1387 cond_resched(); 1388 } 1389 } 1390 1391 start_index = 0; 1392 index = 0; 1393 goto retry; 1394 } 1395 1396 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1397 mapping->writeback_index = index; 1398 1399 out: 1400 ceph_osdc_put_request(req); 1401 ceph_put_snap_context(last_snapc); 1402 doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode), 1403 rc); 1404 return rc; 1405 } 1406 1407 1408 1409 /* 1410 * See if a given @snapc is either writeable, or already written. 1411 */ 1412 static int context_is_writeable_or_written(struct inode *inode, 1413 struct ceph_snap_context *snapc) 1414 { 1415 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1416 int ret = !oldest || snapc->seq <= oldest->seq; 1417 1418 ceph_put_snap_context(oldest); 1419 return ret; 1420 } 1421 1422 /** 1423 * ceph_find_incompatible - find an incompatible context and return it 1424 * @page: page being dirtied 1425 * 1426 * We are only allowed to write into/dirty a page if the page is 1427 * clean, or already dirty within the same snap context. Returns a 1428 * conflicting context if there is one, NULL if there isn't, or a 1429 * negative error code on other errors. 1430 * 1431 * Must be called with page lock held. 1432 */ 1433 static struct ceph_snap_context * 1434 ceph_find_incompatible(struct page *page) 1435 { 1436 struct inode *inode = page->mapping->host; 1437 struct ceph_client *cl = ceph_inode_to_client(inode); 1438 struct ceph_inode_info *ci = ceph_inode(inode); 1439 1440 if (ceph_inode_is_shutdown(inode)) { 1441 doutc(cl, " %llx.%llx page %p is shutdown\n", 1442 ceph_vinop(inode), page); 1443 return ERR_PTR(-ESTALE); 1444 } 1445 1446 for (;;) { 1447 struct ceph_snap_context *snapc, *oldest; 1448 1449 wait_on_page_writeback(page); 1450 1451 snapc = page_snap_context(page); 1452 if (!snapc || snapc == ci->i_head_snapc) 1453 break; 1454 1455 /* 1456 * this page is already dirty in another (older) snap 1457 * context! is it writeable now? 1458 */ 1459 oldest = get_oldest_context(inode, NULL, NULL); 1460 if (snapc->seq > oldest->seq) { 1461 /* not writeable -- return it for the caller to deal with */ 1462 ceph_put_snap_context(oldest); 1463 doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n", 1464 ceph_vinop(inode), page, snapc); 1465 return ceph_get_snap_context(snapc); 1466 } 1467 ceph_put_snap_context(oldest); 1468 1469 /* yay, writeable, do it now (without dropping page lock) */ 1470 doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n", 1471 ceph_vinop(inode), page, snapc); 1472 if (clear_page_dirty_for_io(page)) { 1473 int r = writepage_nounlock(page, NULL); 1474 if (r < 0) 1475 return ERR_PTR(r); 1476 } 1477 } 1478 return NULL; 1479 } 1480 1481 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 1482 struct folio **foliop, void **_fsdata) 1483 { 1484 struct inode *inode = file_inode(file); 1485 struct ceph_inode_info *ci = ceph_inode(inode); 1486 struct ceph_snap_context *snapc; 1487 1488 snapc = ceph_find_incompatible(folio_page(*foliop, 0)); 1489 if (snapc) { 1490 int r; 1491 1492 folio_unlock(*foliop); 1493 folio_put(*foliop); 1494 *foliop = NULL; 1495 if (IS_ERR(snapc)) 1496 return PTR_ERR(snapc); 1497 1498 ceph_queue_writeback(inode); 1499 r = wait_event_killable(ci->i_cap_wq, 1500 context_is_writeable_or_written(inode, snapc)); 1501 ceph_put_snap_context(snapc); 1502 return r == 0 ? -EAGAIN : r; 1503 } 1504 return 0; 1505 } 1506 1507 /* 1508 * We are only allowed to write into/dirty the page if the page is 1509 * clean, or already dirty within the same snap context. 1510 */ 1511 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1512 loff_t pos, unsigned len, 1513 struct page **pagep, void **fsdata) 1514 { 1515 struct inode *inode = file_inode(file); 1516 struct ceph_inode_info *ci = ceph_inode(inode); 1517 struct folio *folio = NULL; 1518 int r; 1519 1520 r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL); 1521 if (r < 0) 1522 return r; 1523 1524 folio_wait_private_2(folio); /* [DEPRECATED] */ 1525 WARN_ON_ONCE(!folio_test_locked(folio)); 1526 *pagep = &folio->page; 1527 return 0; 1528 } 1529 1530 /* 1531 * we don't do anything in here that simple_write_end doesn't do 1532 * except adjust dirty page accounting 1533 */ 1534 static int ceph_write_end(struct file *file, struct address_space *mapping, 1535 loff_t pos, unsigned len, unsigned copied, 1536 struct page *subpage, void *fsdata) 1537 { 1538 struct folio *folio = page_folio(subpage); 1539 struct inode *inode = file_inode(file); 1540 struct ceph_client *cl = ceph_inode_to_client(inode); 1541 bool check_cap = false; 1542 1543 doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode), 1544 file, folio, (int)pos, (int)copied, (int)len); 1545 1546 if (!folio_test_uptodate(folio)) { 1547 /* just return that nothing was copied on a short copy */ 1548 if (copied < len) { 1549 copied = 0; 1550 goto out; 1551 } 1552 folio_mark_uptodate(folio); 1553 } 1554 1555 /* did file size increase? */ 1556 if (pos+copied > i_size_read(inode)) 1557 check_cap = ceph_inode_set_size(inode, pos+copied); 1558 1559 folio_mark_dirty(folio); 1560 1561 out: 1562 folio_unlock(folio); 1563 folio_put(folio); 1564 1565 if (check_cap) 1566 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY); 1567 1568 return copied; 1569 } 1570 1571 const struct address_space_operations ceph_aops = { 1572 .read_folio = netfs_read_folio, 1573 .readahead = netfs_readahead, 1574 .writepage = ceph_writepage, 1575 .writepages = ceph_writepages_start, 1576 .write_begin = ceph_write_begin, 1577 .write_end = ceph_write_end, 1578 .dirty_folio = ceph_dirty_folio, 1579 .invalidate_folio = ceph_invalidate_folio, 1580 .release_folio = netfs_release_folio, 1581 .direct_IO = noop_direct_IO, 1582 }; 1583 1584 static void ceph_block_sigs(sigset_t *oldset) 1585 { 1586 sigset_t mask; 1587 siginitsetinv(&mask, sigmask(SIGKILL)); 1588 sigprocmask(SIG_BLOCK, &mask, oldset); 1589 } 1590 1591 static void ceph_restore_sigs(sigset_t *oldset) 1592 { 1593 sigprocmask(SIG_SETMASK, oldset, NULL); 1594 } 1595 1596 /* 1597 * vm ops 1598 */ 1599 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 1600 { 1601 struct vm_area_struct *vma = vmf->vma; 1602 struct inode *inode = file_inode(vma->vm_file); 1603 struct ceph_inode_info *ci = ceph_inode(inode); 1604 struct ceph_client *cl = ceph_inode_to_client(inode); 1605 struct ceph_file_info *fi = vma->vm_file->private_data; 1606 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 1607 int want, got, err; 1608 sigset_t oldset; 1609 vm_fault_t ret = VM_FAULT_SIGBUS; 1610 1611 if (ceph_inode_is_shutdown(inode)) 1612 return ret; 1613 1614 ceph_block_sigs(&oldset); 1615 1616 doutc(cl, "%llx.%llx %llu trying to get caps\n", 1617 ceph_vinop(inode), off); 1618 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1619 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1620 else 1621 want = CEPH_CAP_FILE_CACHE; 1622 1623 got = 0; 1624 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); 1625 if (err < 0) 1626 goto out_restore; 1627 1628 doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode), 1629 off, ceph_cap_string(got)); 1630 1631 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1632 !ceph_has_inline_data(ci)) { 1633 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1634 ceph_add_rw_context(fi, &rw_ctx); 1635 ret = filemap_fault(vmf); 1636 ceph_del_rw_context(fi, &rw_ctx); 1637 doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n", 1638 ceph_vinop(inode), off, ceph_cap_string(got), ret); 1639 } else 1640 err = -EAGAIN; 1641 1642 ceph_put_cap_refs(ci, got); 1643 1644 if (err != -EAGAIN) 1645 goto out_restore; 1646 1647 /* read inline data */ 1648 if (off >= PAGE_SIZE) { 1649 /* does not support inline data > PAGE_SIZE */ 1650 ret = VM_FAULT_SIGBUS; 1651 } else { 1652 struct address_space *mapping = inode->i_mapping; 1653 struct page *page; 1654 1655 filemap_invalidate_lock_shared(mapping); 1656 page = find_or_create_page(mapping, 0, 1657 mapping_gfp_constraint(mapping, ~__GFP_FS)); 1658 if (!page) { 1659 ret = VM_FAULT_OOM; 1660 goto out_inline; 1661 } 1662 err = __ceph_do_getattr(inode, page, 1663 CEPH_STAT_CAP_INLINE_DATA, true); 1664 if (err < 0 || off >= i_size_read(inode)) { 1665 unlock_page(page); 1666 put_page(page); 1667 ret = vmf_error(err); 1668 goto out_inline; 1669 } 1670 if (err < PAGE_SIZE) 1671 zero_user_segment(page, err, PAGE_SIZE); 1672 else 1673 flush_dcache_page(page); 1674 SetPageUptodate(page); 1675 vmf->page = page; 1676 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1677 out_inline: 1678 filemap_invalidate_unlock_shared(mapping); 1679 doutc(cl, "%llx.%llx %llu read inline data ret %x\n", 1680 ceph_vinop(inode), off, ret); 1681 } 1682 out_restore: 1683 ceph_restore_sigs(&oldset); 1684 if (err < 0) 1685 ret = vmf_error(err); 1686 1687 return ret; 1688 } 1689 1690 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 1691 { 1692 struct vm_area_struct *vma = vmf->vma; 1693 struct inode *inode = file_inode(vma->vm_file); 1694 struct ceph_client *cl = ceph_inode_to_client(inode); 1695 struct ceph_inode_info *ci = ceph_inode(inode); 1696 struct ceph_file_info *fi = vma->vm_file->private_data; 1697 struct ceph_cap_flush *prealloc_cf; 1698 struct page *page = vmf->page; 1699 loff_t off = page_offset(page); 1700 loff_t size = i_size_read(inode); 1701 size_t len; 1702 int want, got, err; 1703 sigset_t oldset; 1704 vm_fault_t ret = VM_FAULT_SIGBUS; 1705 1706 if (ceph_inode_is_shutdown(inode)) 1707 return ret; 1708 1709 prealloc_cf = ceph_alloc_cap_flush(); 1710 if (!prealloc_cf) 1711 return VM_FAULT_OOM; 1712 1713 sb_start_pagefault(inode->i_sb); 1714 ceph_block_sigs(&oldset); 1715 1716 if (off + thp_size(page) <= size) 1717 len = thp_size(page); 1718 else 1719 len = offset_in_thp(page, size); 1720 1721 doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n", 1722 ceph_vinop(inode), off, len, size); 1723 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1724 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1725 else 1726 want = CEPH_CAP_FILE_BUFFER; 1727 1728 got = 0; 1729 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); 1730 if (err < 0) 1731 goto out_free; 1732 1733 doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode), 1734 off, len, ceph_cap_string(got)); 1735 1736 /* Update time before taking page lock */ 1737 file_update_time(vma->vm_file); 1738 inode_inc_iversion_raw(inode); 1739 1740 do { 1741 struct ceph_snap_context *snapc; 1742 1743 lock_page(page); 1744 1745 if (page_mkwrite_check_truncate(page, inode) < 0) { 1746 unlock_page(page); 1747 ret = VM_FAULT_NOPAGE; 1748 break; 1749 } 1750 1751 snapc = ceph_find_incompatible(page); 1752 if (!snapc) { 1753 /* success. we'll keep the page locked. */ 1754 set_page_dirty(page); 1755 ret = VM_FAULT_LOCKED; 1756 break; 1757 } 1758 1759 unlock_page(page); 1760 1761 if (IS_ERR(snapc)) { 1762 ret = VM_FAULT_SIGBUS; 1763 break; 1764 } 1765 1766 ceph_queue_writeback(inode); 1767 err = wait_event_killable(ci->i_cap_wq, 1768 context_is_writeable_or_written(inode, snapc)); 1769 ceph_put_snap_context(snapc); 1770 } while (err == 0); 1771 1772 if (ret == VM_FAULT_LOCKED) { 1773 int dirty; 1774 spin_lock(&ci->i_ceph_lock); 1775 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1776 &prealloc_cf); 1777 spin_unlock(&ci->i_ceph_lock); 1778 if (dirty) 1779 __mark_inode_dirty(inode, dirty); 1780 } 1781 1782 doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n", 1783 ceph_vinop(inode), off, len, ceph_cap_string(got), ret); 1784 ceph_put_cap_refs_async(ci, got); 1785 out_free: 1786 ceph_restore_sigs(&oldset); 1787 sb_end_pagefault(inode->i_sb); 1788 ceph_free_cap_flush(prealloc_cf); 1789 if (err < 0) 1790 ret = vmf_error(err); 1791 return ret; 1792 } 1793 1794 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1795 char *data, size_t len) 1796 { 1797 struct ceph_client *cl = ceph_inode_to_client(inode); 1798 struct address_space *mapping = inode->i_mapping; 1799 struct page *page; 1800 1801 if (locked_page) { 1802 page = locked_page; 1803 } else { 1804 if (i_size_read(inode) == 0) 1805 return; 1806 page = find_or_create_page(mapping, 0, 1807 mapping_gfp_constraint(mapping, 1808 ~__GFP_FS)); 1809 if (!page) 1810 return; 1811 if (PageUptodate(page)) { 1812 unlock_page(page); 1813 put_page(page); 1814 return; 1815 } 1816 } 1817 1818 doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode, 1819 ceph_vinop(inode), len, locked_page); 1820 1821 if (len > 0) { 1822 void *kaddr = kmap_atomic(page); 1823 memcpy(kaddr, data, len); 1824 kunmap_atomic(kaddr); 1825 } 1826 1827 if (page != locked_page) { 1828 if (len < PAGE_SIZE) 1829 zero_user_segment(page, len, PAGE_SIZE); 1830 else 1831 flush_dcache_page(page); 1832 1833 SetPageUptodate(page); 1834 unlock_page(page); 1835 put_page(page); 1836 } 1837 } 1838 1839 int ceph_uninline_data(struct file *file) 1840 { 1841 struct inode *inode = file_inode(file); 1842 struct ceph_inode_info *ci = ceph_inode(inode); 1843 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1844 struct ceph_client *cl = fsc->client; 1845 struct ceph_osd_request *req = NULL; 1846 struct ceph_cap_flush *prealloc_cf = NULL; 1847 struct folio *folio = NULL; 1848 u64 inline_version = CEPH_INLINE_NONE; 1849 struct page *pages[1]; 1850 int err = 0; 1851 u64 len; 1852 1853 spin_lock(&ci->i_ceph_lock); 1854 inline_version = ci->i_inline_version; 1855 spin_unlock(&ci->i_ceph_lock); 1856 1857 doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode), 1858 inline_version); 1859 1860 if (ceph_inode_is_shutdown(inode)) { 1861 err = -EIO; 1862 goto out; 1863 } 1864 1865 if (inline_version == CEPH_INLINE_NONE) 1866 return 0; 1867 1868 prealloc_cf = ceph_alloc_cap_flush(); 1869 if (!prealloc_cf) 1870 return -ENOMEM; 1871 1872 if (inline_version == 1) /* initial version, no data */ 1873 goto out_uninline; 1874 1875 folio = read_mapping_folio(inode->i_mapping, 0, file); 1876 if (IS_ERR(folio)) { 1877 err = PTR_ERR(folio); 1878 goto out; 1879 } 1880 1881 folio_lock(folio); 1882 1883 len = i_size_read(inode); 1884 if (len > folio_size(folio)) 1885 len = folio_size(folio); 1886 1887 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1888 ceph_vino(inode), 0, &len, 0, 1, 1889 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 1890 NULL, 0, 0, false); 1891 if (IS_ERR(req)) { 1892 err = PTR_ERR(req); 1893 goto out_unlock; 1894 } 1895 1896 req->r_mtime = inode_get_mtime(inode); 1897 ceph_osdc_start_request(&fsc->client->osdc, req); 1898 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1899 ceph_osdc_put_request(req); 1900 if (err < 0) 1901 goto out_unlock; 1902 1903 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1904 ceph_vino(inode), 0, &len, 1, 3, 1905 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1906 NULL, ci->i_truncate_seq, 1907 ci->i_truncate_size, false); 1908 if (IS_ERR(req)) { 1909 err = PTR_ERR(req); 1910 goto out_unlock; 1911 } 1912 1913 pages[0] = folio_page(folio, 0); 1914 osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false); 1915 1916 { 1917 __le64 xattr_buf = cpu_to_le64(inline_version); 1918 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1919 "inline_version", &xattr_buf, 1920 sizeof(xattr_buf), 1921 CEPH_OSD_CMPXATTR_OP_GT, 1922 CEPH_OSD_CMPXATTR_MODE_U64); 1923 if (err) 1924 goto out_put_req; 1925 } 1926 1927 { 1928 char xattr_buf[32]; 1929 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1930 "%llu", inline_version); 1931 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1932 "inline_version", 1933 xattr_buf, xattr_len, 0, 0); 1934 if (err) 1935 goto out_put_req; 1936 } 1937 1938 req->r_mtime = inode_get_mtime(inode); 1939 ceph_osdc_start_request(&fsc->client->osdc, req); 1940 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1941 1942 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 1943 req->r_end_latency, len, err); 1944 1945 out_uninline: 1946 if (!err) { 1947 int dirty; 1948 1949 /* Set to CAP_INLINE_NONE and dirty the caps */ 1950 down_read(&fsc->mdsc->snap_rwsem); 1951 spin_lock(&ci->i_ceph_lock); 1952 ci->i_inline_version = CEPH_INLINE_NONE; 1953 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf); 1954 spin_unlock(&ci->i_ceph_lock); 1955 up_read(&fsc->mdsc->snap_rwsem); 1956 if (dirty) 1957 __mark_inode_dirty(inode, dirty); 1958 } 1959 out_put_req: 1960 ceph_osdc_put_request(req); 1961 if (err == -ECANCELED) 1962 err = 0; 1963 out_unlock: 1964 if (folio) { 1965 folio_unlock(folio); 1966 folio_put(folio); 1967 } 1968 out: 1969 ceph_free_cap_flush(prealloc_cf); 1970 doutc(cl, "%llx.%llx inline_version %llu = %d\n", 1971 ceph_vinop(inode), inline_version, err); 1972 return err; 1973 } 1974 1975 static const struct vm_operations_struct ceph_vmops = { 1976 .fault = ceph_filemap_fault, 1977 .page_mkwrite = ceph_page_mkwrite, 1978 }; 1979 1980 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1981 { 1982 struct address_space *mapping = file->f_mapping; 1983 1984 if (!mapping->a_ops->read_folio) 1985 return -ENOEXEC; 1986 vma->vm_ops = &ceph_vmops; 1987 return 0; 1988 } 1989 1990 enum { 1991 POOL_READ = 1, 1992 POOL_WRITE = 2, 1993 }; 1994 1995 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1996 s64 pool, struct ceph_string *pool_ns) 1997 { 1998 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode); 1999 struct ceph_mds_client *mdsc = fsc->mdsc; 2000 struct ceph_client *cl = fsc->client; 2001 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 2002 struct rb_node **p, *parent; 2003 struct ceph_pool_perm *perm; 2004 struct page **pages; 2005 size_t pool_ns_len; 2006 int err = 0, err2 = 0, have = 0; 2007 2008 down_read(&mdsc->pool_perm_rwsem); 2009 p = &mdsc->pool_perm_tree.rb_node; 2010 while (*p) { 2011 perm = rb_entry(*p, struct ceph_pool_perm, node); 2012 if (pool < perm->pool) 2013 p = &(*p)->rb_left; 2014 else if (pool > perm->pool) 2015 p = &(*p)->rb_right; 2016 else { 2017 int ret = ceph_compare_string(pool_ns, 2018 perm->pool_ns, 2019 perm->pool_ns_len); 2020 if (ret < 0) 2021 p = &(*p)->rb_left; 2022 else if (ret > 0) 2023 p = &(*p)->rb_right; 2024 else { 2025 have = perm->perm; 2026 break; 2027 } 2028 } 2029 } 2030 up_read(&mdsc->pool_perm_rwsem); 2031 if (*p) 2032 goto out; 2033 2034 if (pool_ns) 2035 doutc(cl, "pool %lld ns %.*s no perm cached\n", pool, 2036 (int)pool_ns->len, pool_ns->str); 2037 else 2038 doutc(cl, "pool %lld no perm cached\n", pool); 2039 2040 down_write(&mdsc->pool_perm_rwsem); 2041 p = &mdsc->pool_perm_tree.rb_node; 2042 parent = NULL; 2043 while (*p) { 2044 parent = *p; 2045 perm = rb_entry(parent, struct ceph_pool_perm, node); 2046 if (pool < perm->pool) 2047 p = &(*p)->rb_left; 2048 else if (pool > perm->pool) 2049 p = &(*p)->rb_right; 2050 else { 2051 int ret = ceph_compare_string(pool_ns, 2052 perm->pool_ns, 2053 perm->pool_ns_len); 2054 if (ret < 0) 2055 p = &(*p)->rb_left; 2056 else if (ret > 0) 2057 p = &(*p)->rb_right; 2058 else { 2059 have = perm->perm; 2060 break; 2061 } 2062 } 2063 } 2064 if (*p) { 2065 up_write(&mdsc->pool_perm_rwsem); 2066 goto out; 2067 } 2068 2069 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 2070 1, false, GFP_NOFS); 2071 if (!rd_req) { 2072 err = -ENOMEM; 2073 goto out_unlock; 2074 } 2075 2076 rd_req->r_flags = CEPH_OSD_FLAG_READ; 2077 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 2078 rd_req->r_base_oloc.pool = pool; 2079 if (pool_ns) 2080 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 2081 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 2082 2083 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 2084 if (err) 2085 goto out_unlock; 2086 2087 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 2088 1, false, GFP_NOFS); 2089 if (!wr_req) { 2090 err = -ENOMEM; 2091 goto out_unlock; 2092 } 2093 2094 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 2095 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 2096 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 2097 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 2098 2099 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 2100 if (err) 2101 goto out_unlock; 2102 2103 /* one page should be large enough for STAT data */ 2104 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 2105 if (IS_ERR(pages)) { 2106 err = PTR_ERR(pages); 2107 goto out_unlock; 2108 } 2109 2110 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 2111 0, false, true); 2112 ceph_osdc_start_request(&fsc->client->osdc, rd_req); 2113 2114 wr_req->r_mtime = inode_get_mtime(&ci->netfs.inode); 2115 ceph_osdc_start_request(&fsc->client->osdc, wr_req); 2116 2117 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 2118 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 2119 2120 if (err >= 0 || err == -ENOENT) 2121 have |= POOL_READ; 2122 else if (err != -EPERM) { 2123 if (err == -EBLOCKLISTED) 2124 fsc->blocklisted = true; 2125 goto out_unlock; 2126 } 2127 2128 if (err2 == 0 || err2 == -EEXIST) 2129 have |= POOL_WRITE; 2130 else if (err2 != -EPERM) { 2131 if (err2 == -EBLOCKLISTED) 2132 fsc->blocklisted = true; 2133 err = err2; 2134 goto out_unlock; 2135 } 2136 2137 pool_ns_len = pool_ns ? pool_ns->len : 0; 2138 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 2139 if (!perm) { 2140 err = -ENOMEM; 2141 goto out_unlock; 2142 } 2143 2144 perm->pool = pool; 2145 perm->perm = have; 2146 perm->pool_ns_len = pool_ns_len; 2147 if (pool_ns_len > 0) 2148 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 2149 perm->pool_ns[pool_ns_len] = 0; 2150 2151 rb_link_node(&perm->node, parent, p); 2152 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 2153 err = 0; 2154 out_unlock: 2155 up_write(&mdsc->pool_perm_rwsem); 2156 2157 ceph_osdc_put_request(rd_req); 2158 ceph_osdc_put_request(wr_req); 2159 out: 2160 if (!err) 2161 err = have; 2162 if (pool_ns) 2163 doutc(cl, "pool %lld ns %.*s result = %d\n", pool, 2164 (int)pool_ns->len, pool_ns->str, err); 2165 else 2166 doutc(cl, "pool %lld result = %d\n", pool, err); 2167 return err; 2168 } 2169 2170 int ceph_pool_perm_check(struct inode *inode, int need) 2171 { 2172 struct ceph_client *cl = ceph_inode_to_client(inode); 2173 struct ceph_inode_info *ci = ceph_inode(inode); 2174 struct ceph_string *pool_ns; 2175 s64 pool; 2176 int ret, flags; 2177 2178 /* Only need to do this for regular files */ 2179 if (!S_ISREG(inode->i_mode)) 2180 return 0; 2181 2182 if (ci->i_vino.snap != CEPH_NOSNAP) { 2183 /* 2184 * Pool permission check needs to write to the first object. 2185 * But for snapshot, head of the first object may have alread 2186 * been deleted. Skip check to avoid creating orphan object. 2187 */ 2188 return 0; 2189 } 2190 2191 if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode), 2192 NOPOOLPERM)) 2193 return 0; 2194 2195 spin_lock(&ci->i_ceph_lock); 2196 flags = ci->i_ceph_flags; 2197 pool = ci->i_layout.pool_id; 2198 spin_unlock(&ci->i_ceph_lock); 2199 check: 2200 if (flags & CEPH_I_POOL_PERM) { 2201 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 2202 doutc(cl, "pool %lld no read perm\n", pool); 2203 return -EPERM; 2204 } 2205 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 2206 doutc(cl, "pool %lld no write perm\n", pool); 2207 return -EPERM; 2208 } 2209 return 0; 2210 } 2211 2212 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 2213 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 2214 ceph_put_string(pool_ns); 2215 if (ret < 0) 2216 return ret; 2217 2218 flags = CEPH_I_POOL_PERM; 2219 if (ret & POOL_READ) 2220 flags |= CEPH_I_POOL_RD; 2221 if (ret & POOL_WRITE) 2222 flags |= CEPH_I_POOL_WR; 2223 2224 spin_lock(&ci->i_ceph_lock); 2225 if (pool == ci->i_layout.pool_id && 2226 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 2227 ci->i_ceph_flags |= flags; 2228 } else { 2229 pool = ci->i_layout.pool_id; 2230 flags = ci->i_ceph_flags; 2231 } 2232 spin_unlock(&ci->i_ceph_lock); 2233 goto check; 2234 } 2235 2236 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 2237 { 2238 struct ceph_pool_perm *perm; 2239 struct rb_node *n; 2240 2241 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2242 n = rb_first(&mdsc->pool_perm_tree); 2243 perm = rb_entry(n, struct ceph_pool_perm, node); 2244 rb_erase(n, &mdsc->pool_perm_tree); 2245 kfree(perm); 2246 } 2247 } 2248
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.