1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2019 Google LLC 4 */ 5 6 /* 7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation. 8 */ 9 10 #define pr_fmt(fmt) "blk-crypto-fallback: " fmt 11 12 #include <crypto/skcipher.h> 13 #include <linux/blk-crypto.h> 14 #include <linux/blk-crypto-profile.h> 15 #include <linux/blkdev.h> 16 #include <linux/crypto.h> 17 #include <linux/mempool.h> 18 #include <linux/module.h> 19 #include <linux/random.h> 20 #include <linux/scatterlist.h> 21 22 #include "blk-cgroup.h" 23 #include "blk-crypto-internal.h" 24 25 static unsigned int num_prealloc_bounce_pg = 32; 26 module_param(num_prealloc_bounce_pg, uint, 0); 27 MODULE_PARM_DESC(num_prealloc_bounce_pg, 28 "Number of preallocated bounce pages for the blk-crypto crypto API fallback"); 29 30 static unsigned int blk_crypto_num_keyslots = 100; 31 module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0); 32 MODULE_PARM_DESC(num_keyslots, 33 "Number of keyslots for the blk-crypto crypto API fallback"); 34 35 static unsigned int num_prealloc_fallback_crypt_ctxs = 128; 36 module_param(num_prealloc_fallback_crypt_ctxs, uint, 0); 37 MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs, 38 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback"); 39 40 struct bio_fallback_crypt_ctx { 41 struct bio_crypt_ctx crypt_ctx; 42 /* 43 * Copy of the bvec_iter when this bio was submitted. 44 * We only want to en/decrypt the part of the bio as described by the 45 * bvec_iter upon submission because bio might be split before being 46 * resubmitted 47 */ 48 struct bvec_iter crypt_iter; 49 union { 50 struct { 51 struct work_struct work; 52 struct bio *bio; 53 }; 54 struct { 55 void *bi_private_orig; 56 bio_end_io_t *bi_end_io_orig; 57 }; 58 }; 59 }; 60 61 static struct kmem_cache *bio_fallback_crypt_ctx_cache; 62 static mempool_t *bio_fallback_crypt_ctx_pool; 63 64 /* 65 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate 66 * all of a mode's tfms when that mode starts being used. Since each mode may 67 * need all the keyslots at some point, each mode needs its own tfm for each 68 * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to 69 * match the behavior of real inline encryption hardware (which only supports a 70 * single encryption context per keyslot), we only allow one tfm per keyslot to 71 * be used at a time - the rest of the unused tfms have their keys cleared. 72 */ 73 static DEFINE_MUTEX(tfms_init_lock); 74 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; 75 76 static struct blk_crypto_fallback_keyslot { 77 enum blk_crypto_mode_num crypto_mode; 78 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; 79 } *blk_crypto_keyslots; 80 81 static struct blk_crypto_profile *blk_crypto_fallback_profile; 82 static struct workqueue_struct *blk_crypto_wq; 83 static mempool_t *blk_crypto_bounce_page_pool; 84 static struct bio_set crypto_bio_split; 85 86 /* 87 * This is the key we set when evicting a keyslot. This *should* be the all 0's 88 * key, but AES-XTS rejects that key, so we use some random bytes instead. 89 */ 90 static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; 91 92 static void blk_crypto_fallback_evict_keyslot(unsigned int slot) 93 { 94 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; 95 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; 96 int err; 97 98 WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID); 99 100 /* Clear the key in the skcipher */ 101 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key, 102 blk_crypto_modes[crypto_mode].keysize); 103 WARN_ON(err); 104 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; 105 } 106 107 static int 108 blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile, 109 const struct blk_crypto_key *key, 110 unsigned int slot) 111 { 112 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; 113 const enum blk_crypto_mode_num crypto_mode = 114 key->crypto_cfg.crypto_mode; 115 int err; 116 117 if (crypto_mode != slotp->crypto_mode && 118 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) 119 blk_crypto_fallback_evict_keyslot(slot); 120 121 slotp->crypto_mode = crypto_mode; 122 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, 123 key->size); 124 if (err) { 125 blk_crypto_fallback_evict_keyslot(slot); 126 return err; 127 } 128 return 0; 129 } 130 131 static int blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile *profile, 132 const struct blk_crypto_key *key, 133 unsigned int slot) 134 { 135 blk_crypto_fallback_evict_keyslot(slot); 136 return 0; 137 } 138 139 static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = { 140 .keyslot_program = blk_crypto_fallback_keyslot_program, 141 .keyslot_evict = blk_crypto_fallback_keyslot_evict, 142 }; 143 144 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) 145 { 146 struct bio *src_bio = enc_bio->bi_private; 147 int i; 148 149 for (i = 0; i < enc_bio->bi_vcnt; i++) 150 mempool_free(enc_bio->bi_io_vec[i].bv_page, 151 blk_crypto_bounce_page_pool); 152 153 src_bio->bi_status = enc_bio->bi_status; 154 155 bio_uninit(enc_bio); 156 kfree(enc_bio); 157 bio_endio(src_bio); 158 } 159 160 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) 161 { 162 unsigned int nr_segs = bio_segments(bio_src); 163 struct bvec_iter iter; 164 struct bio_vec bv; 165 struct bio *bio; 166 167 bio = bio_kmalloc(nr_segs, GFP_NOIO); 168 if (!bio) 169 return NULL; 170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs, 171 bio_src->bi_opf); 172 if (bio_flagged(bio_src, BIO_REMAPPED)) 173 bio_set_flag(bio, BIO_REMAPPED); 174 bio->bi_ioprio = bio_src->bi_ioprio; 175 bio->bi_write_hint = bio_src->bi_write_hint; 176 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 177 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 178 179 bio_for_each_segment(bv, bio_src, iter) 180 bio->bi_io_vec[bio->bi_vcnt++] = bv; 181 182 bio_clone_blkg_association(bio, bio_src); 183 184 return bio; 185 } 186 187 static bool 188 blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot, 189 struct skcipher_request **ciph_req_ret, 190 struct crypto_wait *wait) 191 { 192 struct skcipher_request *ciph_req; 193 const struct blk_crypto_fallback_keyslot *slotp; 194 int keyslot_idx = blk_crypto_keyslot_index(slot); 195 196 slotp = &blk_crypto_keyslots[keyslot_idx]; 197 ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], 198 GFP_NOIO); 199 if (!ciph_req) 200 return false; 201 202 skcipher_request_set_callback(ciph_req, 203 CRYPTO_TFM_REQ_MAY_BACKLOG | 204 CRYPTO_TFM_REQ_MAY_SLEEP, 205 crypto_req_done, wait); 206 *ciph_req_ret = ciph_req; 207 208 return true; 209 } 210 211 static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) 212 { 213 struct bio *bio = *bio_ptr; 214 unsigned int i = 0; 215 unsigned int num_sectors = 0; 216 struct bio_vec bv; 217 struct bvec_iter iter; 218 219 bio_for_each_segment(bv, bio, iter) { 220 num_sectors += bv.bv_len >> SECTOR_SHIFT; 221 if (++i == BIO_MAX_VECS) 222 break; 223 } 224 if (num_sectors < bio_sectors(bio)) { 225 struct bio *split_bio; 226 227 split_bio = bio_split(bio, num_sectors, GFP_NOIO, 228 &crypto_bio_split); 229 if (!split_bio) { 230 bio->bi_status = BLK_STS_RESOURCE; 231 return false; 232 } 233 bio_chain(split_bio, bio); 234 submit_bio_noacct(bio); 235 *bio_ptr = split_bio; 236 } 237 238 return true; 239 } 240 241 union blk_crypto_iv { 242 __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 243 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; 244 }; 245 246 static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], 247 union blk_crypto_iv *iv) 248 { 249 int i; 250 251 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) 252 iv->dun[i] = cpu_to_le64(dun[i]); 253 } 254 255 /* 256 * The crypto API fallback's encryption routine. 257 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API, 258 * and replace *bio_ptr with the bounce bio. May split input bio if it's too 259 * large. Returns true on success. Returns false and sets bio->bi_status on 260 * error. 261 */ 262 static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) 263 { 264 struct bio *src_bio, *enc_bio; 265 struct bio_crypt_ctx *bc; 266 struct blk_crypto_keyslot *slot; 267 int data_unit_size; 268 struct skcipher_request *ciph_req = NULL; 269 DECLARE_CRYPTO_WAIT(wait); 270 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 271 struct scatterlist src, dst; 272 union blk_crypto_iv iv; 273 unsigned int i, j; 274 bool ret = false; 275 blk_status_t blk_st; 276 277 /* Split the bio if it's too big for single page bvec */ 278 if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr)) 279 return false; 280 281 src_bio = *bio_ptr; 282 bc = src_bio->bi_crypt_context; 283 data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; 284 285 /* Allocate bounce bio for encryption */ 286 enc_bio = blk_crypto_fallback_clone_bio(src_bio); 287 if (!enc_bio) { 288 src_bio->bi_status = BLK_STS_RESOURCE; 289 return false; 290 } 291 292 /* 293 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for 294 * this bio's algorithm and key. 295 */ 296 blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, 297 bc->bc_key, &slot); 298 if (blk_st != BLK_STS_OK) { 299 src_bio->bi_status = blk_st; 300 goto out_put_enc_bio; 301 } 302 303 /* and then allocate an skcipher_request for it */ 304 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { 305 src_bio->bi_status = BLK_STS_RESOURCE; 306 goto out_release_keyslot; 307 } 308 309 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); 310 sg_init_table(&src, 1); 311 sg_init_table(&dst, 1); 312 313 skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size, 314 iv.bytes); 315 316 /* Encrypt each page in the bounce bio */ 317 for (i = 0; i < enc_bio->bi_vcnt; i++) { 318 struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i]; 319 struct page *plaintext_page = enc_bvec->bv_page; 320 struct page *ciphertext_page = 321 mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO); 322 323 enc_bvec->bv_page = ciphertext_page; 324 325 if (!ciphertext_page) { 326 src_bio->bi_status = BLK_STS_RESOURCE; 327 goto out_free_bounce_pages; 328 } 329 330 sg_set_page(&src, plaintext_page, data_unit_size, 331 enc_bvec->bv_offset); 332 sg_set_page(&dst, ciphertext_page, data_unit_size, 333 enc_bvec->bv_offset); 334 335 /* Encrypt each data unit in this page */ 336 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { 337 blk_crypto_dun_to_iv(curr_dun, &iv); 338 if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req), 339 &wait)) { 340 i++; 341 src_bio->bi_status = BLK_STS_IOERR; 342 goto out_free_bounce_pages; 343 } 344 bio_crypt_dun_increment(curr_dun, 1); 345 src.offset += data_unit_size; 346 dst.offset += data_unit_size; 347 } 348 } 349 350 enc_bio->bi_private = src_bio; 351 enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio; 352 *bio_ptr = enc_bio; 353 ret = true; 354 355 enc_bio = NULL; 356 goto out_free_ciph_req; 357 358 out_free_bounce_pages: 359 while (i > 0) 360 mempool_free(enc_bio->bi_io_vec[--i].bv_page, 361 blk_crypto_bounce_page_pool); 362 out_free_ciph_req: 363 skcipher_request_free(ciph_req); 364 out_release_keyslot: 365 blk_crypto_put_keyslot(slot); 366 out_put_enc_bio: 367 if (enc_bio) 368 bio_uninit(enc_bio); 369 kfree(enc_bio); 370 return ret; 371 } 372 373 /* 374 * The crypto API fallback's main decryption routine. 375 * Decrypts input bio in place, and calls bio_endio on the bio. 376 */ 377 static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) 378 { 379 struct bio_fallback_crypt_ctx *f_ctx = 380 container_of(work, struct bio_fallback_crypt_ctx, work); 381 struct bio *bio = f_ctx->bio; 382 struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx; 383 struct blk_crypto_keyslot *slot; 384 struct skcipher_request *ciph_req = NULL; 385 DECLARE_CRYPTO_WAIT(wait); 386 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 387 union blk_crypto_iv iv; 388 struct scatterlist sg; 389 struct bio_vec bv; 390 struct bvec_iter iter; 391 const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; 392 unsigned int i; 393 blk_status_t blk_st; 394 395 /* 396 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for 397 * this bio's algorithm and key. 398 */ 399 blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, 400 bc->bc_key, &slot); 401 if (blk_st != BLK_STS_OK) { 402 bio->bi_status = blk_st; 403 goto out_no_keyslot; 404 } 405 406 /* and then allocate an skcipher_request for it */ 407 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { 408 bio->bi_status = BLK_STS_RESOURCE; 409 goto out; 410 } 411 412 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); 413 sg_init_table(&sg, 1); 414 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size, 415 iv.bytes); 416 417 /* Decrypt each segment in the bio */ 418 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { 419 struct page *page = bv.bv_page; 420 421 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); 422 423 /* Decrypt each data unit in the segment */ 424 for (i = 0; i < bv.bv_len; i += data_unit_size) { 425 blk_crypto_dun_to_iv(curr_dun, &iv); 426 if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req), 427 &wait)) { 428 bio->bi_status = BLK_STS_IOERR; 429 goto out; 430 } 431 bio_crypt_dun_increment(curr_dun, 1); 432 sg.offset += data_unit_size; 433 } 434 } 435 436 out: 437 skcipher_request_free(ciph_req); 438 blk_crypto_put_keyslot(slot); 439 out_no_keyslot: 440 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); 441 bio_endio(bio); 442 } 443 444 /** 445 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption 446 * 447 * @bio: the bio to queue 448 * 449 * Restore bi_private and bi_end_io, and queue the bio for decryption into a 450 * workqueue, since this function will be called from an atomic context. 451 */ 452 static void blk_crypto_fallback_decrypt_endio(struct bio *bio) 453 { 454 struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private; 455 456 bio->bi_private = f_ctx->bi_private_orig; 457 bio->bi_end_io = f_ctx->bi_end_io_orig; 458 459 /* If there was an IO error, don't queue for decrypt. */ 460 if (bio->bi_status) { 461 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); 462 bio_endio(bio); 463 return; 464 } 465 466 INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio); 467 f_ctx->bio = bio; 468 queue_work(blk_crypto_wq, &f_ctx->work); 469 } 470 471 /** 472 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption 473 * 474 * @bio_ptr: pointer to the bio to prepare 475 * 476 * If bio is doing a WRITE operation, this splits the bio into two parts if it's 477 * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a 478 * bounce bio for the first part, encrypts it, and updates bio_ptr to point to 479 * the bounce bio. 480 * 481 * For a READ operation, we mark the bio for decryption by using bi_private and 482 * bi_end_io. 483 * 484 * In either case, this function will make the bio look like a regular bio (i.e. 485 * as if no encryption context was ever specified) for the purposes of the rest 486 * of the stack except for blk-integrity (blk-integrity and blk-crypto are not 487 * currently supported together). 488 * 489 * Return: true on success. Sets bio->bi_status and returns false on error. 490 */ 491 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) 492 { 493 struct bio *bio = *bio_ptr; 494 struct bio_crypt_ctx *bc = bio->bi_crypt_context; 495 struct bio_fallback_crypt_ctx *f_ctx; 496 497 if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) { 498 /* User didn't call blk_crypto_start_using_key() first */ 499 bio->bi_status = BLK_STS_IOERR; 500 return false; 501 } 502 503 if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile, 504 &bc->bc_key->crypto_cfg)) { 505 bio->bi_status = BLK_STS_NOTSUPP; 506 return false; 507 } 508 509 if (bio_data_dir(bio) == WRITE) 510 return blk_crypto_fallback_encrypt_bio(bio_ptr); 511 512 /* 513 * bio READ case: Set up a f_ctx in the bio's bi_private and set the 514 * bi_end_io appropriately to trigger decryption when the bio is ended. 515 */ 516 f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); 517 f_ctx->crypt_ctx = *bc; 518 f_ctx->crypt_iter = bio->bi_iter; 519 f_ctx->bi_private_orig = bio->bi_private; 520 f_ctx->bi_end_io_orig = bio->bi_end_io; 521 bio->bi_private = (void *)f_ctx; 522 bio->bi_end_io = blk_crypto_fallback_decrypt_endio; 523 bio_crypt_free_ctx(bio); 524 525 return true; 526 } 527 528 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) 529 { 530 return __blk_crypto_evict_key(blk_crypto_fallback_profile, key); 531 } 532 533 static bool blk_crypto_fallback_inited; 534 static int blk_crypto_fallback_init(void) 535 { 536 int i; 537 int err; 538 539 if (blk_crypto_fallback_inited) 540 return 0; 541 542 get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); 543 544 err = bioset_init(&crypto_bio_split, 64, 0, 0); 545 if (err) 546 goto out; 547 548 /* Dynamic allocation is needed because of lockdep_register_key(). */ 549 blk_crypto_fallback_profile = 550 kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL); 551 if (!blk_crypto_fallback_profile) { 552 err = -ENOMEM; 553 goto fail_free_bioset; 554 } 555 556 err = blk_crypto_profile_init(blk_crypto_fallback_profile, 557 blk_crypto_num_keyslots); 558 if (err) 559 goto fail_free_profile; 560 err = -ENOMEM; 561 562 blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops; 563 blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; 564 565 /* All blk-crypto modes have a crypto API fallback. */ 566 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) 567 blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF; 568 blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; 569 570 blk_crypto_wq = alloc_workqueue("blk_crypto_wq", 571 WQ_UNBOUND | WQ_HIGHPRI | 572 WQ_MEM_RECLAIM, num_online_cpus()); 573 if (!blk_crypto_wq) 574 goto fail_destroy_profile; 575 576 blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, 577 sizeof(blk_crypto_keyslots[0]), 578 GFP_KERNEL); 579 if (!blk_crypto_keyslots) 580 goto fail_free_wq; 581 582 blk_crypto_bounce_page_pool = 583 mempool_create_page_pool(num_prealloc_bounce_pg, 0); 584 if (!blk_crypto_bounce_page_pool) 585 goto fail_free_keyslots; 586 587 bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); 588 if (!bio_fallback_crypt_ctx_cache) 589 goto fail_free_bounce_page_pool; 590 591 bio_fallback_crypt_ctx_pool = 592 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, 593 bio_fallback_crypt_ctx_cache); 594 if (!bio_fallback_crypt_ctx_pool) 595 goto fail_free_crypt_ctx_cache; 596 597 blk_crypto_fallback_inited = true; 598 599 return 0; 600 fail_free_crypt_ctx_cache: 601 kmem_cache_destroy(bio_fallback_crypt_ctx_cache); 602 fail_free_bounce_page_pool: 603 mempool_destroy(blk_crypto_bounce_page_pool); 604 fail_free_keyslots: 605 kfree(blk_crypto_keyslots); 606 fail_free_wq: 607 destroy_workqueue(blk_crypto_wq); 608 fail_destroy_profile: 609 blk_crypto_profile_destroy(blk_crypto_fallback_profile); 610 fail_free_profile: 611 kfree(blk_crypto_fallback_profile); 612 fail_free_bioset: 613 bioset_exit(&crypto_bio_split); 614 out: 615 return err; 616 } 617 618 /* 619 * Prepare blk-crypto-fallback for the specified crypto mode. 620 * Returns -ENOPKG if the needed crypto API support is missing. 621 */ 622 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) 623 { 624 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; 625 struct blk_crypto_fallback_keyslot *slotp; 626 unsigned int i; 627 int err = 0; 628 629 /* 630 * Fast path 631 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] 632 * for each i are visible before we try to access them. 633 */ 634 if (likely(smp_load_acquire(&tfms_inited[mode_num]))) 635 return 0; 636 637 mutex_lock(&tfms_init_lock); 638 if (tfms_inited[mode_num]) 639 goto out; 640 641 err = blk_crypto_fallback_init(); 642 if (err) 643 goto out; 644 645 for (i = 0; i < blk_crypto_num_keyslots; i++) { 646 slotp = &blk_crypto_keyslots[i]; 647 slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0); 648 if (IS_ERR(slotp->tfms[mode_num])) { 649 err = PTR_ERR(slotp->tfms[mode_num]); 650 if (err == -ENOENT) { 651 pr_warn_once("Missing crypto API support for \"%s\"\n", 652 cipher_str); 653 err = -ENOPKG; 654 } 655 slotp->tfms[mode_num] = NULL; 656 goto out_free_tfms; 657 } 658 659 crypto_skcipher_set_flags(slotp->tfms[mode_num], 660 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); 661 } 662 663 /* 664 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] 665 * for each i are visible before we set tfms_inited[mode_num]. 666 */ 667 smp_store_release(&tfms_inited[mode_num], true); 668 goto out; 669 670 out_free_tfms: 671 for (i = 0; i < blk_crypto_num_keyslots; i++) { 672 slotp = &blk_crypto_keyslots[i]; 673 crypto_free_skcipher(slotp->tfms[mode_num]); 674 slotp->tfms[mode_num] = NULL; 675 } 676 out: 677 mutex_unlock(&tfms_init_lock); 678 return err; 679 } 680
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.