1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* netfs cookie management 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 * 7 * See Documentation/filesystems/caching/netfs-api.rst for more information on 8 * the netfs API. 9 */ 10 11 #define FSCACHE_DEBUG_LEVEL COOKIE 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include "internal.h" 15 16 struct kmem_cache *fscache_cookie_jar; 17 18 static void fscache_cookie_lru_timed_out(struct timer_list *timer); 19 static void fscache_cookie_lru_worker(struct work_struct *work); 20 static void fscache_cookie_worker(struct work_struct *work); 21 static void fscache_unhash_cookie(struct fscache_cookie *cookie); 22 static void fscache_perform_invalidation(struct fscache_cookie *cookie); 23 24 #define fscache_cookie_hash_shift 15 25 static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift]; 26 static LIST_HEAD(fscache_cookies); 27 static DEFINE_RWLOCK(fscache_cookies_lock); 28 static LIST_HEAD(fscache_cookie_lru); 29 static DEFINE_SPINLOCK(fscache_cookie_lru_lock); 30 DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out); 31 static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker); 32 static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD"; 33 static unsigned int fscache_lru_cookie_timeout = 10 * HZ; 34 35 void fscache_print_cookie(struct fscache_cookie *cookie, char prefix) 36 { 37 const u8 *k; 38 39 pr_err("%c-cookie c=%08x [fl=%lx na=%u nA=%u s=%c]\n", 40 prefix, 41 cookie->debug_id, 42 cookie->flags, 43 atomic_read(&cookie->n_active), 44 atomic_read(&cookie->n_accesses), 45 fscache_cookie_states[cookie->state]); 46 pr_err("%c-cookie V=%08x [%s]\n", 47 prefix, 48 cookie->volume->debug_id, 49 cookie->volume->key); 50 51 k = (cookie->key_len <= sizeof(cookie->inline_key)) ? 52 cookie->inline_key : cookie->key; 53 pr_err("%c-key=[%u] '%*phN'\n", prefix, cookie->key_len, cookie->key_len, k); 54 } 55 56 static void fscache_free_cookie(struct fscache_cookie *cookie) 57 { 58 if (WARN_ON_ONCE(!list_empty(&cookie->commit_link))) { 59 spin_lock(&fscache_cookie_lru_lock); 60 list_del_init(&cookie->commit_link); 61 spin_unlock(&fscache_cookie_lru_lock); 62 fscache_stat_d(&fscache_n_cookies_lru); 63 fscache_stat(&fscache_n_cookies_lru_removed); 64 } 65 66 if (WARN_ON_ONCE(test_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags))) { 67 fscache_print_cookie(cookie, 'F'); 68 return; 69 } 70 71 write_lock(&fscache_cookies_lock); 72 list_del(&cookie->proc_link); 73 write_unlock(&fscache_cookies_lock); 74 if (cookie->aux_len > sizeof(cookie->inline_aux)) 75 kfree(cookie->aux); 76 if (cookie->key_len > sizeof(cookie->inline_key)) 77 kfree(cookie->key); 78 fscache_stat_d(&fscache_n_cookies); 79 kmem_cache_free(fscache_cookie_jar, cookie); 80 } 81 82 static void __fscache_queue_cookie(struct fscache_cookie *cookie) 83 { 84 if (!queue_work(fscache_wq, &cookie->work)) 85 fscache_put_cookie(cookie, fscache_cookie_put_over_queued); 86 } 87 88 static void fscache_queue_cookie(struct fscache_cookie *cookie, 89 enum fscache_cookie_trace where) 90 { 91 fscache_get_cookie(cookie, where); 92 __fscache_queue_cookie(cookie); 93 } 94 95 /* 96 * Initialise the access gate on a cookie by setting a flag to prevent the 97 * state machine from being queued when the access counter transitions to 0. 98 * We're only interested in this when we withdraw caching services from the 99 * cookie. 100 */ 101 static void fscache_init_access_gate(struct fscache_cookie *cookie) 102 { 103 int n_accesses; 104 105 n_accesses = atomic_read(&cookie->n_accesses); 106 trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), 107 n_accesses, fscache_access_cache_pin); 108 set_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags); 109 } 110 111 /** 112 * fscache_end_cookie_access - Unpin a cache at the end of an access. 113 * @cookie: A data file cookie 114 * @why: An indication of the circumstances of the access for tracing 115 * 116 * Unpin a cache cookie after we've accessed it and bring a deferred 117 * relinquishment or withdrawal state into effect. 118 * 119 * The @why indicator is provided for tracing purposes. 120 */ 121 void fscache_end_cookie_access(struct fscache_cookie *cookie, 122 enum fscache_access_trace why) 123 { 124 int n_accesses; 125 126 smp_mb__before_atomic(); 127 n_accesses = atomic_dec_return(&cookie->n_accesses); 128 trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), 129 n_accesses, why); 130 if (n_accesses == 0 && 131 !test_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags)) 132 fscache_queue_cookie(cookie, fscache_cookie_get_end_access); 133 } 134 EXPORT_SYMBOL(fscache_end_cookie_access); 135 136 /* 137 * Pin the cache behind a cookie so that we can access it. 138 */ 139 static void __fscache_begin_cookie_access(struct fscache_cookie *cookie, 140 enum fscache_access_trace why) 141 { 142 int n_accesses; 143 144 n_accesses = atomic_inc_return(&cookie->n_accesses); 145 smp_mb__after_atomic(); /* (Future) read state after is-caching. 146 * Reread n_accesses after is-caching 147 */ 148 trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), 149 n_accesses, why); 150 } 151 152 /** 153 * fscache_begin_cookie_access - Pin a cache so data can be accessed 154 * @cookie: A data file cookie 155 * @why: An indication of the circumstances of the access for tracing 156 * 157 * Attempt to pin the cache to prevent it from going away whilst we're 158 * accessing data and returns true if successful. This works as follows: 159 * 160 * (1) If the cookie is not being cached (ie. FSCACHE_COOKIE_IS_CACHING is not 161 * set), we return false to indicate access was not permitted. 162 * 163 * (2) If the cookie is being cached, we increment its n_accesses count and 164 * then recheck the IS_CACHING flag, ending the access if it got cleared. 165 * 166 * (3) When we end the access, we decrement the cookie's n_accesses and wake 167 * up the any waiters if it reaches 0. 168 * 169 * (4) Whilst the cookie is actively being cached, its n_accesses is kept 170 * artificially incremented to prevent wakeups from happening. 171 * 172 * (5) When the cache is taken offline or if the cookie is culled, the flag is 173 * cleared to prevent new accesses, the cookie's n_accesses is decremented 174 * and we wait for it to become 0. 175 * 176 * The @why indicator are merely provided for tracing purposes. 177 */ 178 bool fscache_begin_cookie_access(struct fscache_cookie *cookie, 179 enum fscache_access_trace why) 180 { 181 if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) 182 return false; 183 __fscache_begin_cookie_access(cookie, why); 184 if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) || 185 !fscache_cache_is_live(cookie->volume->cache)) { 186 fscache_end_cookie_access(cookie, fscache_access_unlive); 187 return false; 188 } 189 return true; 190 } 191 192 static inline void wake_up_cookie_state(struct fscache_cookie *cookie) 193 { 194 /* Use a barrier to ensure that waiters see the state variable 195 * change, as spin_unlock doesn't guarantee a barrier. 196 * 197 * See comments over wake_up_bit() and waitqueue_active(). 198 */ 199 smp_mb(); 200 wake_up_var(&cookie->state); 201 } 202 203 /* 204 * Change the state a cookie is at and wake up anyone waiting for that. Impose 205 * an ordering between the stuff stored in the cookie and the state member. 206 * Paired with fscache_cookie_state(). 207 */ 208 static void __fscache_set_cookie_state(struct fscache_cookie *cookie, 209 enum fscache_cookie_state state) 210 { 211 smp_store_release(&cookie->state, state); 212 } 213 214 static void fscache_set_cookie_state(struct fscache_cookie *cookie, 215 enum fscache_cookie_state state) 216 { 217 spin_lock(&cookie->lock); 218 __fscache_set_cookie_state(cookie, state); 219 spin_unlock(&cookie->lock); 220 wake_up_cookie_state(cookie); 221 } 222 223 /** 224 * fscache_cookie_lookup_negative - Note negative lookup 225 * @cookie: The cookie that was being looked up 226 * 227 * Note that some part of the metadata path in the cache doesn't exist and so 228 * we can release any waiting readers in the certain knowledge that there's 229 * nothing for them to actually read. 230 * 231 * This function uses no locking and must only be called from the state machine. 232 */ 233 void fscache_cookie_lookup_negative(struct fscache_cookie *cookie) 234 { 235 set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); 236 fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_CREATING); 237 } 238 EXPORT_SYMBOL(fscache_cookie_lookup_negative); 239 240 /** 241 * fscache_resume_after_invalidation - Allow I/O to resume after invalidation 242 * @cookie: The cookie that was invalidated 243 * 244 * Tell fscache that invalidation is sufficiently complete that I/O can be 245 * allowed again. 246 */ 247 void fscache_resume_after_invalidation(struct fscache_cookie *cookie) 248 { 249 fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE); 250 } 251 EXPORT_SYMBOL(fscache_resume_after_invalidation); 252 253 /** 254 * fscache_caching_failed - Report that a failure stopped caching on a cookie 255 * @cookie: The cookie that was affected 256 * 257 * Tell fscache that caching on a cookie needs to be stopped due to some sort 258 * of failure. 259 * 260 * This function uses no locking and must only be called from the state machine. 261 */ 262 void fscache_caching_failed(struct fscache_cookie *cookie) 263 { 264 clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags); 265 fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED); 266 trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref), 267 fscache_cookie_failed); 268 } 269 EXPORT_SYMBOL(fscache_caching_failed); 270 271 /* 272 * Set the index key in a cookie. The cookie struct has space for a 16-byte 273 * key plus length and hash, but if that's not big enough, it's instead a 274 * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then 275 * the key data. 276 */ 277 static int fscache_set_key(struct fscache_cookie *cookie, 278 const void *index_key, size_t index_key_len) 279 { 280 void *buf; 281 size_t buf_size; 282 283 buf_size = round_up(index_key_len, sizeof(__le32)); 284 285 if (index_key_len > sizeof(cookie->inline_key)) { 286 buf = kzalloc(buf_size, GFP_KERNEL); 287 if (!buf) 288 return -ENOMEM; 289 cookie->key = buf; 290 } else { 291 buf = cookie->inline_key; 292 } 293 294 memcpy(buf, index_key, index_key_len); 295 cookie->key_hash = fscache_hash(cookie->volume->key_hash, 296 buf, buf_size); 297 return 0; 298 } 299 300 static bool fscache_cookie_same(const struct fscache_cookie *a, 301 const struct fscache_cookie *b) 302 { 303 const void *ka, *kb; 304 305 if (a->key_hash != b->key_hash || 306 a->volume != b->volume || 307 a->key_len != b->key_len) 308 return false; 309 310 if (a->key_len <= sizeof(a->inline_key)) { 311 ka = &a->inline_key; 312 kb = &b->inline_key; 313 } else { 314 ka = a->key; 315 kb = b->key; 316 } 317 return memcmp(ka, kb, a->key_len) == 0; 318 } 319 320 static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1); 321 322 /* 323 * Allocate a cookie. 324 */ 325 static struct fscache_cookie *fscache_alloc_cookie( 326 struct fscache_volume *volume, 327 u8 advice, 328 const void *index_key, size_t index_key_len, 329 const void *aux_data, size_t aux_data_len, 330 loff_t object_size) 331 { 332 struct fscache_cookie *cookie; 333 334 /* allocate and initialise a cookie */ 335 cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); 336 if (!cookie) 337 return NULL; 338 fscache_stat(&fscache_n_cookies); 339 340 cookie->volume = volume; 341 cookie->advice = advice; 342 cookie->key_len = index_key_len; 343 cookie->aux_len = aux_data_len; 344 cookie->object_size = object_size; 345 if (object_size == 0) 346 __set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); 347 348 if (fscache_set_key(cookie, index_key, index_key_len) < 0) 349 goto nomem; 350 351 if (cookie->aux_len <= sizeof(cookie->inline_aux)) { 352 memcpy(cookie->inline_aux, aux_data, cookie->aux_len); 353 } else { 354 cookie->aux = kmemdup(aux_data, cookie->aux_len, GFP_KERNEL); 355 if (!cookie->aux) 356 goto nomem; 357 } 358 359 refcount_set(&cookie->ref, 1); 360 cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id); 361 spin_lock_init(&cookie->lock); 362 INIT_LIST_HEAD(&cookie->commit_link); 363 INIT_WORK(&cookie->work, fscache_cookie_worker); 364 __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); 365 366 write_lock(&fscache_cookies_lock); 367 list_add_tail(&cookie->proc_link, &fscache_cookies); 368 write_unlock(&fscache_cookies_lock); 369 fscache_see_cookie(cookie, fscache_cookie_new_acquire); 370 return cookie; 371 372 nomem: 373 fscache_free_cookie(cookie); 374 return NULL; 375 } 376 377 static inline bool fscache_cookie_is_dropped(struct fscache_cookie *cookie) 378 { 379 return READ_ONCE(cookie->state) == FSCACHE_COOKIE_STATE_DROPPED; 380 } 381 382 static void fscache_wait_on_collision(struct fscache_cookie *candidate, 383 struct fscache_cookie *wait_for) 384 { 385 enum fscache_cookie_state *statep = &wait_for->state; 386 387 wait_var_event_timeout(statep, fscache_cookie_is_dropped(wait_for), 388 20 * HZ); 389 if (!fscache_cookie_is_dropped(wait_for)) { 390 pr_notice("Potential collision c=%08x old: c=%08x", 391 candidate->debug_id, wait_for->debug_id); 392 wait_var_event(statep, fscache_cookie_is_dropped(wait_for)); 393 } 394 } 395 396 /* 397 * Attempt to insert the new cookie into the hash. If there's a collision, we 398 * wait for the old cookie to complete if it's being relinquished and an error 399 * otherwise. 400 */ 401 static bool fscache_hash_cookie(struct fscache_cookie *candidate) 402 { 403 struct fscache_cookie *cursor, *wait_for = NULL; 404 struct hlist_bl_head *h; 405 struct hlist_bl_node *p; 406 unsigned int bucket; 407 408 bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1); 409 h = &fscache_cookie_hash[bucket]; 410 411 hlist_bl_lock(h); 412 hlist_bl_for_each_entry(cursor, p, h, hash_link) { 413 if (fscache_cookie_same(candidate, cursor)) { 414 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cursor->flags)) 415 goto collision; 416 wait_for = fscache_get_cookie(cursor, 417 fscache_cookie_get_hash_collision); 418 break; 419 } 420 } 421 422 fscache_get_volume(candidate->volume, fscache_volume_get_cookie); 423 atomic_inc(&candidate->volume->n_cookies); 424 hlist_bl_add_head(&candidate->hash_link, h); 425 set_bit(FSCACHE_COOKIE_IS_HASHED, &candidate->flags); 426 hlist_bl_unlock(h); 427 428 if (wait_for) { 429 fscache_wait_on_collision(candidate, wait_for); 430 fscache_put_cookie(wait_for, fscache_cookie_put_hash_collision); 431 } 432 return true; 433 434 collision: 435 trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref), 436 fscache_cookie_collision); 437 pr_err("Duplicate cookie detected\n"); 438 fscache_print_cookie(cursor, 'O'); 439 fscache_print_cookie(candidate, 'N'); 440 hlist_bl_unlock(h); 441 return false; 442 } 443 444 /* 445 * Request a cookie to represent a data storage object within a volume. 446 * 447 * We never let on to the netfs about errors. We may set a negative cookie 448 * pointer, but that's okay 449 */ 450 struct fscache_cookie *__fscache_acquire_cookie( 451 struct fscache_volume *volume, 452 u8 advice, 453 const void *index_key, size_t index_key_len, 454 const void *aux_data, size_t aux_data_len, 455 loff_t object_size) 456 { 457 struct fscache_cookie *cookie; 458 459 _enter("V=%x", volume->debug_id); 460 461 if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255) 462 return NULL; 463 if (!aux_data || !aux_data_len) { 464 aux_data = NULL; 465 aux_data_len = 0; 466 } 467 468 fscache_stat(&fscache_n_acquires); 469 470 cookie = fscache_alloc_cookie(volume, advice, 471 index_key, index_key_len, 472 aux_data, aux_data_len, 473 object_size); 474 if (!cookie) { 475 fscache_stat(&fscache_n_acquires_oom); 476 return NULL; 477 } 478 479 if (!fscache_hash_cookie(cookie)) { 480 fscache_see_cookie(cookie, fscache_cookie_discard); 481 fscache_free_cookie(cookie); 482 return NULL; 483 } 484 485 trace_fscache_acquire(cookie); 486 fscache_stat(&fscache_n_acquires_ok); 487 _leave(" = c=%08x", cookie->debug_id); 488 return cookie; 489 } 490 EXPORT_SYMBOL(__fscache_acquire_cookie); 491 492 /* 493 * Prepare a cache object to be written to. 494 */ 495 static void fscache_prepare_to_write(struct fscache_cookie *cookie) 496 { 497 cookie->volume->cache->ops->prepare_to_write(cookie); 498 } 499 500 /* 501 * Look up a cookie in the cache. 502 */ 503 static void fscache_perform_lookup(struct fscache_cookie *cookie) 504 { 505 enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed; 506 bool need_withdraw = false; 507 508 _enter(""); 509 510 if (!cookie->volume->cache_priv) { 511 fscache_create_volume(cookie->volume, true); 512 if (!cookie->volume->cache_priv) { 513 fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); 514 goto out; 515 } 516 } 517 518 if (!cookie->volume->cache->ops->lookup_cookie(cookie)) { 519 if (cookie->state != FSCACHE_COOKIE_STATE_FAILED) 520 fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); 521 need_withdraw = true; 522 _leave(" [fail]"); 523 goto out; 524 } 525 526 fscache_see_cookie(cookie, fscache_cookie_see_active); 527 spin_lock(&cookie->lock); 528 if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags)) 529 __fscache_set_cookie_state(cookie, 530 FSCACHE_COOKIE_STATE_INVALIDATING); 531 else 532 __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE); 533 spin_unlock(&cookie->lock); 534 wake_up_cookie_state(cookie); 535 trace = fscache_access_lookup_cookie_end; 536 537 out: 538 fscache_end_cookie_access(cookie, trace); 539 if (need_withdraw) 540 fscache_withdraw_cookie(cookie); 541 fscache_end_volume_access(cookie->volume, cookie, trace); 542 } 543 544 /* 545 * Begin the process of looking up a cookie. We offload the actual process to 546 * a worker thread. 547 */ 548 static bool fscache_begin_lookup(struct fscache_cookie *cookie, bool will_modify) 549 { 550 if (will_modify) { 551 set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags); 552 set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags); 553 } 554 if (!fscache_begin_volume_access(cookie->volume, cookie, 555 fscache_access_lookup_cookie)) 556 return false; 557 558 __fscache_begin_cookie_access(cookie, fscache_access_lookup_cookie); 559 __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_LOOKING_UP); 560 set_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags); 561 set_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags); 562 return true; 563 } 564 565 /* 566 * Start using the cookie for I/O. This prevents the backing object from being 567 * reaped by VM pressure. 568 */ 569 void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify) 570 { 571 enum fscache_cookie_state state; 572 bool queue = false; 573 int n_active; 574 575 _enter("c=%08x", cookie->debug_id); 576 577 if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), 578 "Trying to use relinquished cookie\n")) 579 return; 580 581 spin_lock(&cookie->lock); 582 583 n_active = atomic_inc_return(&cookie->n_active); 584 trace_fscache_active(cookie->debug_id, refcount_read(&cookie->ref), 585 n_active, atomic_read(&cookie->n_accesses), 586 will_modify ? 587 fscache_active_use_modify : fscache_active_use); 588 589 again: 590 state = fscache_cookie_state(cookie); 591 switch (state) { 592 case FSCACHE_COOKIE_STATE_QUIESCENT: 593 queue = fscache_begin_lookup(cookie, will_modify); 594 break; 595 596 case FSCACHE_COOKIE_STATE_LOOKING_UP: 597 case FSCACHE_COOKIE_STATE_CREATING: 598 if (will_modify) 599 set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags); 600 break; 601 case FSCACHE_COOKIE_STATE_ACTIVE: 602 case FSCACHE_COOKIE_STATE_INVALIDATING: 603 if (will_modify && 604 !test_and_set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags)) { 605 set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags); 606 queue = true; 607 } 608 /* 609 * We could race with cookie_lru which may set LRU_DISCARD bit 610 * but has yet to run the cookie state machine. If this happens 611 * and another thread tries to use the cookie, clear LRU_DISCARD 612 * so we don't end up withdrawing the cookie while in use. 613 */ 614 if (test_and_clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) 615 fscache_see_cookie(cookie, fscache_cookie_see_lru_discard_clear); 616 break; 617 618 case FSCACHE_COOKIE_STATE_FAILED: 619 case FSCACHE_COOKIE_STATE_WITHDRAWING: 620 break; 621 622 case FSCACHE_COOKIE_STATE_LRU_DISCARDING: 623 spin_unlock(&cookie->lock); 624 wait_var_event(&cookie->state, 625 fscache_cookie_state(cookie) != 626 FSCACHE_COOKIE_STATE_LRU_DISCARDING); 627 spin_lock(&cookie->lock); 628 goto again; 629 630 case FSCACHE_COOKIE_STATE_DROPPED: 631 case FSCACHE_COOKIE_STATE_RELINQUISHING: 632 WARN(1, "Can't use cookie in state %u\n", state); 633 break; 634 } 635 636 spin_unlock(&cookie->lock); 637 if (queue) 638 fscache_queue_cookie(cookie, fscache_cookie_get_use_work); 639 _leave(""); 640 } 641 EXPORT_SYMBOL(__fscache_use_cookie); 642 643 static void fscache_unuse_cookie_locked(struct fscache_cookie *cookie) 644 { 645 clear_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags); 646 if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) 647 return; 648 649 cookie->unused_at = jiffies; 650 spin_lock(&fscache_cookie_lru_lock); 651 if (list_empty(&cookie->commit_link)) { 652 fscache_get_cookie(cookie, fscache_cookie_get_lru); 653 fscache_stat(&fscache_n_cookies_lru); 654 } 655 list_move_tail(&cookie->commit_link, &fscache_cookie_lru); 656 657 spin_unlock(&fscache_cookie_lru_lock); 658 timer_reduce(&fscache_cookie_lru_timer, 659 jiffies + fscache_lru_cookie_timeout); 660 } 661 662 /* 663 * Stop using the cookie for I/O. 664 */ 665 void __fscache_unuse_cookie(struct fscache_cookie *cookie, 666 const void *aux_data, const loff_t *object_size) 667 { 668 unsigned int debug_id = cookie->debug_id; 669 unsigned int r = refcount_read(&cookie->ref); 670 unsigned int a = atomic_read(&cookie->n_accesses); 671 unsigned int c; 672 673 if (aux_data || object_size) 674 __fscache_update_cookie(cookie, aux_data, object_size); 675 676 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ 677 c = atomic_fetch_add_unless(&cookie->n_active, -1, 1); 678 if (c != 1) { 679 trace_fscache_active(debug_id, r, c - 1, a, fscache_active_unuse); 680 return; 681 } 682 683 spin_lock(&cookie->lock); 684 r = refcount_read(&cookie->ref); 685 a = atomic_read(&cookie->n_accesses); 686 c = atomic_dec_return(&cookie->n_active); 687 trace_fscache_active(debug_id, r, c, a, fscache_active_unuse); 688 if (c == 0) 689 fscache_unuse_cookie_locked(cookie); 690 spin_unlock(&cookie->lock); 691 } 692 EXPORT_SYMBOL(__fscache_unuse_cookie); 693 694 /* 695 * Perform work upon the cookie, such as committing its cache state, 696 * relinquishing it or withdrawing the backing cache. We're protected from the 697 * cache going away under us as object withdrawal must come through this 698 * non-reentrant work item. 699 */ 700 static void fscache_cookie_state_machine(struct fscache_cookie *cookie) 701 { 702 enum fscache_cookie_state state; 703 bool wake = false; 704 705 _enter("c=%x", cookie->debug_id); 706 707 again: 708 spin_lock(&cookie->lock); 709 again_locked: 710 state = cookie->state; 711 switch (state) { 712 case FSCACHE_COOKIE_STATE_QUIESCENT: 713 /* The QUIESCENT state is jumped to the LOOKING_UP state by 714 * fscache_use_cookie(). 715 */ 716 717 if (atomic_read(&cookie->n_accesses) == 0 && 718 test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) { 719 __fscache_set_cookie_state(cookie, 720 FSCACHE_COOKIE_STATE_RELINQUISHING); 721 wake = true; 722 goto again_locked; 723 } 724 break; 725 726 case FSCACHE_COOKIE_STATE_LOOKING_UP: 727 spin_unlock(&cookie->lock); 728 fscache_init_access_gate(cookie); 729 fscache_perform_lookup(cookie); 730 goto again; 731 732 case FSCACHE_COOKIE_STATE_INVALIDATING: 733 spin_unlock(&cookie->lock); 734 fscache_perform_invalidation(cookie); 735 goto again; 736 737 case FSCACHE_COOKIE_STATE_ACTIVE: 738 if (test_and_clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags)) { 739 spin_unlock(&cookie->lock); 740 fscache_prepare_to_write(cookie); 741 spin_lock(&cookie->lock); 742 } 743 if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) { 744 if (atomic_read(&cookie->n_accesses) != 0) 745 /* still being accessed: postpone it */ 746 break; 747 748 __fscache_set_cookie_state(cookie, 749 FSCACHE_COOKIE_STATE_LRU_DISCARDING); 750 wake = true; 751 goto again_locked; 752 } 753 fallthrough; 754 755 case FSCACHE_COOKIE_STATE_FAILED: 756 if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags)) 757 fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end); 758 759 if (atomic_read(&cookie->n_accesses) != 0) 760 break; 761 if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) { 762 __fscache_set_cookie_state(cookie, 763 FSCACHE_COOKIE_STATE_RELINQUISHING); 764 wake = true; 765 goto again_locked; 766 } 767 if (test_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags)) { 768 __fscache_set_cookie_state(cookie, 769 FSCACHE_COOKIE_STATE_WITHDRAWING); 770 wake = true; 771 goto again_locked; 772 } 773 break; 774 775 case FSCACHE_COOKIE_STATE_LRU_DISCARDING: 776 case FSCACHE_COOKIE_STATE_RELINQUISHING: 777 case FSCACHE_COOKIE_STATE_WITHDRAWING: 778 if (cookie->cache_priv) { 779 spin_unlock(&cookie->lock); 780 cookie->volume->cache->ops->withdraw_cookie(cookie); 781 spin_lock(&cookie->lock); 782 } 783 784 if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags)) 785 fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end); 786 787 switch (state) { 788 case FSCACHE_COOKIE_STATE_RELINQUISHING: 789 fscache_see_cookie(cookie, fscache_cookie_see_relinquish); 790 fscache_unhash_cookie(cookie); 791 __fscache_set_cookie_state(cookie, 792 FSCACHE_COOKIE_STATE_DROPPED); 793 wake = true; 794 goto out; 795 case FSCACHE_COOKIE_STATE_LRU_DISCARDING: 796 fscache_see_cookie(cookie, fscache_cookie_see_lru_discard); 797 break; 798 case FSCACHE_COOKIE_STATE_WITHDRAWING: 799 fscache_see_cookie(cookie, fscache_cookie_see_withdraw); 800 break; 801 default: 802 BUG(); 803 } 804 805 clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags); 806 clear_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags); 807 clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags); 808 clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags); 809 set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); 810 __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); 811 wake = true; 812 goto again_locked; 813 814 case FSCACHE_COOKIE_STATE_DROPPED: 815 break; 816 817 default: 818 WARN_ONCE(1, "Cookie %x in unexpected state %u\n", 819 cookie->debug_id, state); 820 break; 821 } 822 823 out: 824 spin_unlock(&cookie->lock); 825 if (wake) 826 wake_up_cookie_state(cookie); 827 _leave(""); 828 } 829 830 static void fscache_cookie_worker(struct work_struct *work) 831 { 832 struct fscache_cookie *cookie = container_of(work, struct fscache_cookie, work); 833 834 fscache_see_cookie(cookie, fscache_cookie_see_work); 835 fscache_cookie_state_machine(cookie); 836 fscache_put_cookie(cookie, fscache_cookie_put_work); 837 } 838 839 /* 840 * Wait for the object to become inactive. The cookie's work item will be 841 * scheduled when someone transitions n_accesses to 0 - but if someone's 842 * already done that, schedule it anyway. 843 */ 844 static void __fscache_withdraw_cookie(struct fscache_cookie *cookie) 845 { 846 int n_accesses; 847 bool unpinned; 848 849 unpinned = test_and_clear_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags); 850 851 /* Need to read the access count after unpinning */ 852 n_accesses = atomic_read(&cookie->n_accesses); 853 if (unpinned) 854 trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), 855 n_accesses, fscache_access_cache_unpin); 856 if (n_accesses == 0) 857 fscache_queue_cookie(cookie, fscache_cookie_get_end_access); 858 } 859 860 static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie) 861 { 862 fscache_see_cookie(cookie, fscache_cookie_see_lru_do_one); 863 864 spin_lock(&cookie->lock); 865 if (cookie->state != FSCACHE_COOKIE_STATE_ACTIVE || 866 time_before(jiffies, cookie->unused_at + fscache_lru_cookie_timeout) || 867 atomic_read(&cookie->n_active) > 0) { 868 spin_unlock(&cookie->lock); 869 fscache_stat(&fscache_n_cookies_lru_removed); 870 } else { 871 set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags); 872 spin_unlock(&cookie->lock); 873 fscache_stat(&fscache_n_cookies_lru_expired); 874 _debug("lru c=%x", cookie->debug_id); 875 __fscache_withdraw_cookie(cookie); 876 } 877 878 fscache_put_cookie(cookie, fscache_cookie_put_lru); 879 } 880 881 static void fscache_cookie_lru_worker(struct work_struct *work) 882 { 883 struct fscache_cookie *cookie; 884 unsigned long unused_at; 885 886 spin_lock(&fscache_cookie_lru_lock); 887 888 while (!list_empty(&fscache_cookie_lru)) { 889 cookie = list_first_entry(&fscache_cookie_lru, 890 struct fscache_cookie, commit_link); 891 unused_at = cookie->unused_at + fscache_lru_cookie_timeout; 892 if (time_before(jiffies, unused_at)) { 893 timer_reduce(&fscache_cookie_lru_timer, unused_at); 894 break; 895 } 896 897 list_del_init(&cookie->commit_link); 898 fscache_stat_d(&fscache_n_cookies_lru); 899 spin_unlock(&fscache_cookie_lru_lock); 900 fscache_cookie_lru_do_one(cookie); 901 spin_lock(&fscache_cookie_lru_lock); 902 } 903 904 spin_unlock(&fscache_cookie_lru_lock); 905 } 906 907 static void fscache_cookie_lru_timed_out(struct timer_list *timer) 908 { 909 queue_work(fscache_wq, &fscache_cookie_lru_work); 910 } 911 912 static void fscache_cookie_drop_from_lru(struct fscache_cookie *cookie) 913 { 914 bool need_put = false; 915 916 if (!list_empty(&cookie->commit_link)) { 917 spin_lock(&fscache_cookie_lru_lock); 918 if (!list_empty(&cookie->commit_link)) { 919 list_del_init(&cookie->commit_link); 920 fscache_stat_d(&fscache_n_cookies_lru); 921 fscache_stat(&fscache_n_cookies_lru_dropped); 922 need_put = true; 923 } 924 spin_unlock(&fscache_cookie_lru_lock); 925 if (need_put) 926 fscache_put_cookie(cookie, fscache_cookie_put_lru); 927 } 928 } 929 930 /* 931 * Remove a cookie from the hash table. 932 */ 933 static void fscache_unhash_cookie(struct fscache_cookie *cookie) 934 { 935 struct hlist_bl_head *h; 936 unsigned int bucket; 937 938 bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1); 939 h = &fscache_cookie_hash[bucket]; 940 941 hlist_bl_lock(h); 942 hlist_bl_del(&cookie->hash_link); 943 clear_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags); 944 hlist_bl_unlock(h); 945 fscache_stat(&fscache_n_relinquishes_dropped); 946 } 947 948 static void fscache_drop_withdraw_cookie(struct fscache_cookie *cookie) 949 { 950 fscache_cookie_drop_from_lru(cookie); 951 __fscache_withdraw_cookie(cookie); 952 } 953 954 /** 955 * fscache_withdraw_cookie - Mark a cookie for withdrawal 956 * @cookie: The cookie to be withdrawn. 957 * 958 * Allow the cache backend to withdraw the backing for a cookie for its own 959 * reasons, even if that cookie is in active use. 960 */ 961 void fscache_withdraw_cookie(struct fscache_cookie *cookie) 962 { 963 set_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags); 964 fscache_drop_withdraw_cookie(cookie); 965 } 966 EXPORT_SYMBOL(fscache_withdraw_cookie); 967 968 /* 969 * Allow the netfs to release a cookie back to the cache. 970 * - the object will be marked as recyclable on disk if retire is true 971 */ 972 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire) 973 { 974 fscache_stat(&fscache_n_relinquishes); 975 if (retire) 976 fscache_stat(&fscache_n_relinquishes_retire); 977 978 _enter("c=%08x{%d},%d", 979 cookie->debug_id, atomic_read(&cookie->n_active), retire); 980 981 if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), 982 "Cookie c=%x already relinquished\n", cookie->debug_id)) 983 return; 984 985 if (retire) 986 set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags); 987 trace_fscache_relinquish(cookie, retire); 988 989 ASSERTCMP(atomic_read(&cookie->n_active), ==, 0); 990 ASSERTCMP(atomic_read(&cookie->volume->n_cookies), >, 0); 991 atomic_dec(&cookie->volume->n_cookies); 992 993 if (test_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags)) { 994 set_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags); 995 fscache_drop_withdraw_cookie(cookie); 996 } else { 997 fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_DROPPED); 998 fscache_unhash_cookie(cookie); 999 } 1000 fscache_put_cookie(cookie, fscache_cookie_put_relinquish); 1001 } 1002 EXPORT_SYMBOL(__fscache_relinquish_cookie); 1003 1004 /* 1005 * Drop a reference to a cookie. 1006 */ 1007 void fscache_put_cookie(struct fscache_cookie *cookie, 1008 enum fscache_cookie_trace where) 1009 { 1010 struct fscache_volume *volume = cookie->volume; 1011 unsigned int cookie_debug_id = cookie->debug_id; 1012 bool zero; 1013 int ref; 1014 1015 zero = __refcount_dec_and_test(&cookie->ref, &ref); 1016 trace_fscache_cookie(cookie_debug_id, ref - 1, where); 1017 if (zero) { 1018 fscache_free_cookie(cookie); 1019 fscache_put_volume(volume, fscache_volume_put_cookie); 1020 } 1021 } 1022 EXPORT_SYMBOL(fscache_put_cookie); 1023 1024 /* 1025 * Get a reference to a cookie. 1026 */ 1027 struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie, 1028 enum fscache_cookie_trace where) 1029 { 1030 int ref; 1031 1032 __refcount_inc(&cookie->ref, &ref); 1033 trace_fscache_cookie(cookie->debug_id, ref + 1, where); 1034 return cookie; 1035 } 1036 EXPORT_SYMBOL(fscache_get_cookie); 1037 1038 /* 1039 * Ask the cache to effect invalidation of a cookie. 1040 */ 1041 static void fscache_perform_invalidation(struct fscache_cookie *cookie) 1042 { 1043 if (!cookie->volume->cache->ops->invalidate_cookie(cookie)) 1044 fscache_caching_failed(cookie); 1045 fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end); 1046 } 1047 1048 /* 1049 * Invalidate an object. 1050 */ 1051 void __fscache_invalidate(struct fscache_cookie *cookie, 1052 const void *aux_data, loff_t new_size, 1053 unsigned int flags) 1054 { 1055 bool is_caching; 1056 1057 _enter("c=%x", cookie->debug_id); 1058 1059 fscache_stat(&fscache_n_invalidates); 1060 1061 if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), 1062 "Trying to invalidate relinquished cookie\n")) 1063 return; 1064 1065 if ((flags & FSCACHE_INVAL_DIO_WRITE) && 1066 test_and_set_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags)) 1067 return; 1068 1069 spin_lock(&cookie->lock); 1070 set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); 1071 fscache_update_aux(cookie, aux_data, &new_size); 1072 cookie->inval_counter++; 1073 trace_fscache_invalidate(cookie, new_size); 1074 1075 switch (cookie->state) { 1076 case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */ 1077 default: 1078 spin_unlock(&cookie->lock); 1079 _leave(" [no %u]", cookie->state); 1080 return; 1081 1082 case FSCACHE_COOKIE_STATE_LOOKING_UP: 1083 if (!test_and_set_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags)) 1084 __fscache_begin_cookie_access(cookie, fscache_access_invalidate_cookie); 1085 fallthrough; 1086 case FSCACHE_COOKIE_STATE_CREATING: 1087 spin_unlock(&cookie->lock); 1088 _leave(" [look %x]", cookie->inval_counter); 1089 return; 1090 1091 case FSCACHE_COOKIE_STATE_ACTIVE: 1092 is_caching = fscache_begin_cookie_access( 1093 cookie, fscache_access_invalidate_cookie); 1094 if (is_caching) 1095 __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_INVALIDATING); 1096 spin_unlock(&cookie->lock); 1097 wake_up_cookie_state(cookie); 1098 1099 if (is_caching) 1100 fscache_queue_cookie(cookie, fscache_cookie_get_inval_work); 1101 _leave(" [inv]"); 1102 return; 1103 } 1104 } 1105 EXPORT_SYMBOL(__fscache_invalidate); 1106 1107 #ifdef CONFIG_PROC_FS 1108 /* 1109 * Generate a list of extant cookies in /proc/fs/fscache/cookies 1110 */ 1111 static int fscache_cookies_seq_show(struct seq_file *m, void *v) 1112 { 1113 struct fscache_cookie *cookie; 1114 unsigned int keylen = 0, auxlen = 0; 1115 u8 *p; 1116 1117 if (v == &fscache_cookies) { 1118 seq_puts(m, 1119 "COOKIE VOLUME REF ACT ACC S FL DEF \n" 1120 "======== ======== === === === = == ================\n" 1121 ); 1122 return 0; 1123 } 1124 1125 cookie = list_entry(v, struct fscache_cookie, proc_link); 1126 1127 seq_printf(m, 1128 "%08x %08x %3d %3d %3d %c %02lx", 1129 cookie->debug_id, 1130 cookie->volume->debug_id, 1131 refcount_read(&cookie->ref), 1132 atomic_read(&cookie->n_active), 1133 atomic_read(&cookie->n_accesses), 1134 fscache_cookie_states[cookie->state], 1135 cookie->flags); 1136 1137 keylen = cookie->key_len; 1138 auxlen = cookie->aux_len; 1139 1140 if (keylen > 0 || auxlen > 0) { 1141 seq_puts(m, " "); 1142 p = keylen <= sizeof(cookie->inline_key) ? 1143 cookie->inline_key : cookie->key; 1144 for (; keylen > 0; keylen--) 1145 seq_printf(m, "%02x", *p++); 1146 if (auxlen > 0) { 1147 seq_puts(m, ", "); 1148 p = auxlen <= sizeof(cookie->inline_aux) ? 1149 cookie->inline_aux : cookie->aux; 1150 for (; auxlen > 0; auxlen--) 1151 seq_printf(m, "%02x", *p++); 1152 } 1153 } 1154 1155 seq_puts(m, "\n"); 1156 return 0; 1157 } 1158 1159 static void *fscache_cookies_seq_start(struct seq_file *m, loff_t *_pos) 1160 __acquires(fscache_cookies_lock) 1161 { 1162 read_lock(&fscache_cookies_lock); 1163 return seq_list_start_head(&fscache_cookies, *_pos); 1164 } 1165 1166 static void *fscache_cookies_seq_next(struct seq_file *m, void *v, loff_t *_pos) 1167 { 1168 return seq_list_next(v, &fscache_cookies, _pos); 1169 } 1170 1171 static void fscache_cookies_seq_stop(struct seq_file *m, void *v) 1172 __releases(rcu) 1173 { 1174 read_unlock(&fscache_cookies_lock); 1175 } 1176 1177 1178 const struct seq_operations fscache_cookies_seq_ops = { 1179 .start = fscache_cookies_seq_start, 1180 .next = fscache_cookies_seq_next, 1181 .stop = fscache_cookies_seq_stop, 1182 .show = fscache_cookies_seq_show, 1183 }; 1184 #endif 1185
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.