1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation 4 * 5 * Authors: 6 * Mimi Zohar <zohar@us.ibm.com> 7 * Kylene Hall <kjhall@us.ibm.com> 8 * 9 * File: ima_crypto.c 10 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/moduleparam.h> 15 #include <linux/ratelimit.h> 16 #include <linux/file.h> 17 #include <linux/crypto.h> 18 #include <linux/scatterlist.h> 19 #include <linux/err.h> 20 #include <linux/slab.h> 21 #include <crypto/hash.h> 22 23 #include "ima.h" 24 25 /* minimum file size for ahash use */ 26 static unsigned long ima_ahash_minsize; 27 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); 28 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use"); 29 30 /* default is 0 - 1 page. */ 31 static int ima_maxorder; 32 static unsigned int ima_bufsize = PAGE_SIZE; 33 34 static int param_set_bufsize(const char *val, const struct kernel_param *kp) 35 { 36 unsigned long long size; 37 int order; 38 39 size = memparse(val, NULL); 40 order = get_order(size); 41 if (order > MAX_PAGE_ORDER) 42 return -EINVAL; 43 ima_maxorder = order; 44 ima_bufsize = PAGE_SIZE << order; 45 return 0; 46 } 47 48 static const struct kernel_param_ops param_ops_bufsize = { 49 .set = param_set_bufsize, 50 .get = param_get_uint, 51 }; 52 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int) 53 54 module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644); 55 MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size"); 56 57 static struct crypto_shash *ima_shash_tfm; 58 static struct crypto_ahash *ima_ahash_tfm; 59 60 int ima_sha1_idx __ro_after_init; 61 int ima_hash_algo_idx __ro_after_init; 62 /* 63 * Additional number of slots reserved, as needed, for SHA1 64 * and IMA default algo. 65 */ 66 int ima_extra_slots __ro_after_init; 67 68 struct ima_algo_desc *ima_algo_array __ro_after_init; 69 70 static int __init ima_init_ima_crypto(void) 71 { 72 long rc; 73 74 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); 75 if (IS_ERR(ima_shash_tfm)) { 76 rc = PTR_ERR(ima_shash_tfm); 77 pr_err("Can not allocate %s (reason: %ld)\n", 78 hash_algo_name[ima_hash_algo], rc); 79 return rc; 80 } 81 pr_info("Allocated hash algorithm: %s\n", 82 hash_algo_name[ima_hash_algo]); 83 return 0; 84 } 85 86 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) 87 { 88 struct crypto_shash *tfm = ima_shash_tfm; 89 int rc, i; 90 91 if (algo < 0 || algo >= HASH_ALGO__LAST) 92 algo = ima_hash_algo; 93 94 if (algo == ima_hash_algo) 95 return tfm; 96 97 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) 98 if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo) 99 return ima_algo_array[i].tfm; 100 101 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); 102 if (IS_ERR(tfm)) { 103 rc = PTR_ERR(tfm); 104 pr_err("Can not allocate %s (reason: %d)\n", 105 hash_algo_name[algo], rc); 106 } 107 return tfm; 108 } 109 110 int __init ima_init_crypto(void) 111 { 112 enum hash_algo algo; 113 long rc; 114 int i; 115 116 rc = ima_init_ima_crypto(); 117 if (rc) 118 return rc; 119 120 ima_sha1_idx = -1; 121 ima_hash_algo_idx = -1; 122 123 for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { 124 algo = ima_tpm_chip->allocated_banks[i].crypto_id; 125 if (algo == HASH_ALGO_SHA1) 126 ima_sha1_idx = i; 127 128 if (algo == ima_hash_algo) 129 ima_hash_algo_idx = i; 130 } 131 132 if (ima_sha1_idx < 0) { 133 ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; 134 if (ima_hash_algo == HASH_ALGO_SHA1) 135 ima_hash_algo_idx = ima_sha1_idx; 136 } 137 138 if (ima_hash_algo_idx < 0) 139 ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; 140 141 ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots, 142 sizeof(*ima_algo_array), GFP_KERNEL); 143 if (!ima_algo_array) { 144 rc = -ENOMEM; 145 goto out; 146 } 147 148 for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { 149 algo = ima_tpm_chip->allocated_banks[i].crypto_id; 150 ima_algo_array[i].algo = algo; 151 152 /* unknown TPM algorithm */ 153 if (algo == HASH_ALGO__LAST) 154 continue; 155 156 if (algo == ima_hash_algo) { 157 ima_algo_array[i].tfm = ima_shash_tfm; 158 continue; 159 } 160 161 ima_algo_array[i].tfm = ima_alloc_tfm(algo); 162 if (IS_ERR(ima_algo_array[i].tfm)) { 163 if (algo == HASH_ALGO_SHA1) { 164 rc = PTR_ERR(ima_algo_array[i].tfm); 165 ima_algo_array[i].tfm = NULL; 166 goto out_array; 167 } 168 169 ima_algo_array[i].tfm = NULL; 170 } 171 } 172 173 if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) { 174 if (ima_hash_algo == HASH_ALGO_SHA1) { 175 ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm; 176 } else { 177 ima_algo_array[ima_sha1_idx].tfm = 178 ima_alloc_tfm(HASH_ALGO_SHA1); 179 if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) { 180 rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm); 181 goto out_array; 182 } 183 } 184 185 ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1; 186 } 187 188 if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) && 189 ima_hash_algo_idx != ima_sha1_idx) { 190 ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm; 191 ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo; 192 } 193 194 return 0; 195 out_array: 196 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { 197 if (!ima_algo_array[i].tfm || 198 ima_algo_array[i].tfm == ima_shash_tfm) 199 continue; 200 201 crypto_free_shash(ima_algo_array[i].tfm); 202 } 203 kfree(ima_algo_array); 204 out: 205 crypto_free_shash(ima_shash_tfm); 206 return rc; 207 } 208 209 static void ima_free_tfm(struct crypto_shash *tfm) 210 { 211 int i; 212 213 if (tfm == ima_shash_tfm) 214 return; 215 216 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) 217 if (ima_algo_array[i].tfm == tfm) 218 return; 219 220 crypto_free_shash(tfm); 221 } 222 223 /** 224 * ima_alloc_pages() - Allocate contiguous pages. 225 * @max_size: Maximum amount of memory to allocate. 226 * @allocated_size: Returned size of actual allocation. 227 * @last_warn: Should the min_size allocation warn or not. 228 * 229 * Tries to do opportunistic allocation for memory first trying to allocate 230 * max_size amount of memory and then splitting that until zero order is 231 * reached. Allocation is tried without generating allocation warnings unless 232 * last_warn is set. Last_warn set affects only last allocation of zero order. 233 * 234 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL) 235 * 236 * Return pointer to allocated memory, or NULL on failure. 237 */ 238 static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size, 239 int last_warn) 240 { 241 void *ptr; 242 int order = ima_maxorder; 243 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; 244 245 if (order) 246 order = min(get_order(max_size), order); 247 248 for (; order; order--) { 249 ptr = (void *)__get_free_pages(gfp_mask, order); 250 if (ptr) { 251 *allocated_size = PAGE_SIZE << order; 252 return ptr; 253 } 254 } 255 256 /* order is zero - one page */ 257 258 gfp_mask = GFP_KERNEL; 259 260 if (!last_warn) 261 gfp_mask |= __GFP_NOWARN; 262 263 ptr = (void *)__get_free_pages(gfp_mask, 0); 264 if (ptr) { 265 *allocated_size = PAGE_SIZE; 266 return ptr; 267 } 268 269 *allocated_size = 0; 270 return NULL; 271 } 272 273 /** 274 * ima_free_pages() - Free pages allocated by ima_alloc_pages(). 275 * @ptr: Pointer to allocated pages. 276 * @size: Size of allocated buffer. 277 */ 278 static void ima_free_pages(void *ptr, size_t size) 279 { 280 if (!ptr) 281 return; 282 free_pages((unsigned long)ptr, get_order(size)); 283 } 284 285 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo) 286 { 287 struct crypto_ahash *tfm = ima_ahash_tfm; 288 int rc; 289 290 if (algo < 0 || algo >= HASH_ALGO__LAST) 291 algo = ima_hash_algo; 292 293 if (algo != ima_hash_algo || !tfm) { 294 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0); 295 if (!IS_ERR(tfm)) { 296 if (algo == ima_hash_algo) 297 ima_ahash_tfm = tfm; 298 } else { 299 rc = PTR_ERR(tfm); 300 pr_err("Can not allocate %s (reason: %d)\n", 301 hash_algo_name[algo], rc); 302 } 303 } 304 return tfm; 305 } 306 307 static void ima_free_atfm(struct crypto_ahash *tfm) 308 { 309 if (tfm != ima_ahash_tfm) 310 crypto_free_ahash(tfm); 311 } 312 313 static inline int ahash_wait(int err, struct crypto_wait *wait) 314 { 315 316 err = crypto_wait_req(err, wait); 317 318 if (err) 319 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); 320 321 return err; 322 } 323 324 static int ima_calc_file_hash_atfm(struct file *file, 325 struct ima_digest_data *hash, 326 struct crypto_ahash *tfm) 327 { 328 loff_t i_size, offset; 329 char *rbuf[2] = { NULL, }; 330 int rc, rbuf_len, active = 0, ahash_rc = 0; 331 struct ahash_request *req; 332 struct scatterlist sg[1]; 333 struct crypto_wait wait; 334 size_t rbuf_size[2]; 335 336 hash->length = crypto_ahash_digestsize(tfm); 337 338 req = ahash_request_alloc(tfm, GFP_KERNEL); 339 if (!req) 340 return -ENOMEM; 341 342 crypto_init_wait(&wait); 343 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 344 CRYPTO_TFM_REQ_MAY_SLEEP, 345 crypto_req_done, &wait); 346 347 rc = ahash_wait(crypto_ahash_init(req), &wait); 348 if (rc) 349 goto out1; 350 351 i_size = i_size_read(file_inode(file)); 352 353 if (i_size == 0) 354 goto out2; 355 356 /* 357 * Try to allocate maximum size of memory. 358 * Fail if even a single page cannot be allocated. 359 */ 360 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); 361 if (!rbuf[0]) { 362 rc = -ENOMEM; 363 goto out1; 364 } 365 366 /* Only allocate one buffer if that is enough. */ 367 if (i_size > rbuf_size[0]) { 368 /* 369 * Try to allocate secondary buffer. If that fails fallback to 370 * using single buffering. Use previous memory allocation size 371 * as baseline for possible allocation size. 372 */ 373 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], 374 &rbuf_size[1], 0); 375 } 376 377 for (offset = 0; offset < i_size; offset += rbuf_len) { 378 if (!rbuf[1] && offset) { 379 /* Not using two buffers, and it is not the first 380 * read/request, wait for the completion of the 381 * previous ahash_update() request. 382 */ 383 rc = ahash_wait(ahash_rc, &wait); 384 if (rc) 385 goto out3; 386 } 387 /* read buffer */ 388 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); 389 rc = integrity_kernel_read(file, offset, rbuf[active], 390 rbuf_len); 391 if (rc != rbuf_len) { 392 if (rc >= 0) 393 rc = -EINVAL; 394 /* 395 * Forward current rc, do not overwrite with return value 396 * from ahash_wait() 397 */ 398 ahash_wait(ahash_rc, &wait); 399 goto out3; 400 } 401 402 if (rbuf[1] && offset) { 403 /* Using two buffers, and it is not the first 404 * read/request, wait for the completion of the 405 * previous ahash_update() request. 406 */ 407 rc = ahash_wait(ahash_rc, &wait); 408 if (rc) 409 goto out3; 410 } 411 412 sg_init_one(&sg[0], rbuf[active], rbuf_len); 413 ahash_request_set_crypt(req, sg, NULL, rbuf_len); 414 415 ahash_rc = crypto_ahash_update(req); 416 417 if (rbuf[1]) 418 active = !active; /* swap buffers, if we use two */ 419 } 420 /* wait for the last update request to complete */ 421 rc = ahash_wait(ahash_rc, &wait); 422 out3: 423 ima_free_pages(rbuf[0], rbuf_size[0]); 424 ima_free_pages(rbuf[1], rbuf_size[1]); 425 out2: 426 if (!rc) { 427 ahash_request_set_crypt(req, NULL, hash->digest, 0); 428 rc = ahash_wait(crypto_ahash_final(req), &wait); 429 } 430 out1: 431 ahash_request_free(req); 432 return rc; 433 } 434 435 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash) 436 { 437 struct crypto_ahash *tfm; 438 int rc; 439 440 tfm = ima_alloc_atfm(hash->algo); 441 if (IS_ERR(tfm)) 442 return PTR_ERR(tfm); 443 444 rc = ima_calc_file_hash_atfm(file, hash, tfm); 445 446 ima_free_atfm(tfm); 447 448 return rc; 449 } 450 451 static int ima_calc_file_hash_tfm(struct file *file, 452 struct ima_digest_data *hash, 453 struct crypto_shash *tfm) 454 { 455 loff_t i_size, offset = 0; 456 char *rbuf; 457 int rc; 458 SHASH_DESC_ON_STACK(shash, tfm); 459 460 shash->tfm = tfm; 461 462 hash->length = crypto_shash_digestsize(tfm); 463 464 rc = crypto_shash_init(shash); 465 if (rc != 0) 466 return rc; 467 468 i_size = i_size_read(file_inode(file)); 469 470 if (i_size == 0) 471 goto out; 472 473 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); 474 if (!rbuf) 475 return -ENOMEM; 476 477 while (offset < i_size) { 478 int rbuf_len; 479 480 rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE); 481 if (rbuf_len < 0) { 482 rc = rbuf_len; 483 break; 484 } 485 if (rbuf_len == 0) { /* unexpected EOF */ 486 rc = -EINVAL; 487 break; 488 } 489 offset += rbuf_len; 490 491 rc = crypto_shash_update(shash, rbuf, rbuf_len); 492 if (rc) 493 break; 494 } 495 kfree(rbuf); 496 out: 497 if (!rc) 498 rc = crypto_shash_final(shash, hash->digest); 499 return rc; 500 } 501 502 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash) 503 { 504 struct crypto_shash *tfm; 505 int rc; 506 507 tfm = ima_alloc_tfm(hash->algo); 508 if (IS_ERR(tfm)) 509 return PTR_ERR(tfm); 510 511 rc = ima_calc_file_hash_tfm(file, hash, tfm); 512 513 ima_free_tfm(tfm); 514 515 return rc; 516 } 517 518 /* 519 * ima_calc_file_hash - calculate file hash 520 * 521 * Asynchronous hash (ahash) allows using HW acceleration for calculating 522 * a hash. ahash performance varies for different data sizes on different 523 * crypto accelerators. shash performance might be better for smaller files. 524 * The 'ima.ahash_minsize' module parameter allows specifying the best 525 * minimum file size for using ahash on the system. 526 * 527 * If the ima.ahash_minsize parameter is not specified, this function uses 528 * shash for the hash calculation. If ahash fails, it falls back to using 529 * shash. 530 */ 531 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) 532 { 533 loff_t i_size; 534 int rc; 535 struct file *f = file; 536 bool new_file_instance = false; 537 538 /* 539 * For consistency, fail file's opened with the O_DIRECT flag on 540 * filesystems mounted with/without DAX option. 541 */ 542 if (file->f_flags & O_DIRECT) { 543 hash->length = hash_digest_size[ima_hash_algo]; 544 hash->algo = ima_hash_algo; 545 return -EINVAL; 546 } 547 548 /* Open a new file instance in O_RDONLY if we cannot read */ 549 if (!(file->f_mode & FMODE_READ)) { 550 int flags = file->f_flags & ~(O_WRONLY | O_APPEND | 551 O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL); 552 flags |= O_RDONLY; 553 f = dentry_open(&file->f_path, flags, file->f_cred); 554 if (IS_ERR(f)) 555 return PTR_ERR(f); 556 557 new_file_instance = true; 558 } 559 560 i_size = i_size_read(file_inode(f)); 561 562 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { 563 rc = ima_calc_file_ahash(f, hash); 564 if (!rc) 565 goto out; 566 } 567 568 rc = ima_calc_file_shash(f, hash); 569 out: 570 if (new_file_instance) 571 fput(f); 572 return rc; 573 } 574 575 /* 576 * Calculate the hash of template data 577 */ 578 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, 579 struct ima_template_entry *entry, 580 int tfm_idx) 581 { 582 SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm); 583 struct ima_template_desc *td = entry->template_desc; 584 int num_fields = entry->template_desc->num_fields; 585 int rc, i; 586 587 shash->tfm = ima_algo_array[tfm_idx].tfm; 588 589 rc = crypto_shash_init(shash); 590 if (rc != 0) 591 return rc; 592 593 for (i = 0; i < num_fields; i++) { 594 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; 595 u8 *data_to_hash = field_data[i].data; 596 u32 datalen = field_data[i].len; 597 u32 datalen_to_hash = !ima_canonical_fmt ? 598 datalen : (__force u32)cpu_to_le32(datalen); 599 600 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { 601 rc = crypto_shash_update(shash, 602 (const u8 *) &datalen_to_hash, 603 sizeof(datalen_to_hash)); 604 if (rc) 605 break; 606 } else if (strcmp(td->fields[i]->field_id, "n") == 0) { 607 memcpy(buffer, data_to_hash, datalen); 608 data_to_hash = buffer; 609 datalen = IMA_EVENT_NAME_LEN_MAX + 1; 610 } 611 rc = crypto_shash_update(shash, data_to_hash, datalen); 612 if (rc) 613 break; 614 } 615 616 if (!rc) 617 rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest); 618 619 return rc; 620 } 621 622 int ima_calc_field_array_hash(struct ima_field_data *field_data, 623 struct ima_template_entry *entry) 624 { 625 u16 alg_id; 626 int rc, i; 627 628 rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx); 629 if (rc) 630 return rc; 631 632 entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1; 633 634 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { 635 if (i == ima_sha1_idx) 636 continue; 637 638 if (i < NR_BANKS(ima_tpm_chip)) { 639 alg_id = ima_tpm_chip->allocated_banks[i].alg_id; 640 entry->digests[i].alg_id = alg_id; 641 } 642 643 /* for unmapped TPM algorithms digest is still a padded SHA1 */ 644 if (!ima_algo_array[i].tfm) { 645 memcpy(entry->digests[i].digest, 646 entry->digests[ima_sha1_idx].digest, 647 TPM_DIGEST_SIZE); 648 continue; 649 } 650 651 rc = ima_calc_field_array_hash_tfm(field_data, entry, i); 652 if (rc) 653 return rc; 654 } 655 return rc; 656 } 657 658 static int calc_buffer_ahash_atfm(const void *buf, loff_t len, 659 struct ima_digest_data *hash, 660 struct crypto_ahash *tfm) 661 { 662 struct ahash_request *req; 663 struct scatterlist sg; 664 struct crypto_wait wait; 665 int rc, ahash_rc = 0; 666 667 hash->length = crypto_ahash_digestsize(tfm); 668 669 req = ahash_request_alloc(tfm, GFP_KERNEL); 670 if (!req) 671 return -ENOMEM; 672 673 crypto_init_wait(&wait); 674 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 675 CRYPTO_TFM_REQ_MAY_SLEEP, 676 crypto_req_done, &wait); 677 678 rc = ahash_wait(crypto_ahash_init(req), &wait); 679 if (rc) 680 goto out; 681 682 sg_init_one(&sg, buf, len); 683 ahash_request_set_crypt(req, &sg, NULL, len); 684 685 ahash_rc = crypto_ahash_update(req); 686 687 /* wait for the update request to complete */ 688 rc = ahash_wait(ahash_rc, &wait); 689 if (!rc) { 690 ahash_request_set_crypt(req, NULL, hash->digest, 0); 691 rc = ahash_wait(crypto_ahash_final(req), &wait); 692 } 693 out: 694 ahash_request_free(req); 695 return rc; 696 } 697 698 static int calc_buffer_ahash(const void *buf, loff_t len, 699 struct ima_digest_data *hash) 700 { 701 struct crypto_ahash *tfm; 702 int rc; 703 704 tfm = ima_alloc_atfm(hash->algo); 705 if (IS_ERR(tfm)) 706 return PTR_ERR(tfm); 707 708 rc = calc_buffer_ahash_atfm(buf, len, hash, tfm); 709 710 ima_free_atfm(tfm); 711 712 return rc; 713 } 714 715 static int calc_buffer_shash_tfm(const void *buf, loff_t size, 716 struct ima_digest_data *hash, 717 struct crypto_shash *tfm) 718 { 719 SHASH_DESC_ON_STACK(shash, tfm); 720 unsigned int len; 721 int rc; 722 723 shash->tfm = tfm; 724 725 hash->length = crypto_shash_digestsize(tfm); 726 727 rc = crypto_shash_init(shash); 728 if (rc != 0) 729 return rc; 730 731 while (size) { 732 len = size < PAGE_SIZE ? size : PAGE_SIZE; 733 rc = crypto_shash_update(shash, buf, len); 734 if (rc) 735 break; 736 buf += len; 737 size -= len; 738 } 739 740 if (!rc) 741 rc = crypto_shash_final(shash, hash->digest); 742 return rc; 743 } 744 745 static int calc_buffer_shash(const void *buf, loff_t len, 746 struct ima_digest_data *hash) 747 { 748 struct crypto_shash *tfm; 749 int rc; 750 751 tfm = ima_alloc_tfm(hash->algo); 752 if (IS_ERR(tfm)) 753 return PTR_ERR(tfm); 754 755 rc = calc_buffer_shash_tfm(buf, len, hash, tfm); 756 757 ima_free_tfm(tfm); 758 return rc; 759 } 760 761 int ima_calc_buffer_hash(const void *buf, loff_t len, 762 struct ima_digest_data *hash) 763 { 764 int rc; 765 766 if (ima_ahash_minsize && len >= ima_ahash_minsize) { 767 rc = calc_buffer_ahash(buf, len, hash); 768 if (!rc) 769 return 0; 770 } 771 772 return calc_buffer_shash(buf, len, hash); 773 } 774 775 static void ima_pcrread(u32 idx, struct tpm_digest *d) 776 { 777 if (!ima_tpm_chip) 778 return; 779 780 if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0) 781 pr_err("Error Communicating to TPM chip\n"); 782 } 783 784 /* 785 * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With 786 * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with 787 * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, 788 * allowing firmware to configure and enable different banks. 789 * 790 * Knowing which TPM bank is read to calculate the boot_aggregate digest 791 * needs to be conveyed to a verifier. For this reason, use the same 792 * hash algorithm for reading the TPM PCRs as for calculating the boot 793 * aggregate digest as stored in the measurement list. 794 */ 795 static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, 796 struct crypto_shash *tfm) 797 { 798 struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; 799 int rc; 800 u32 i; 801 SHASH_DESC_ON_STACK(shash, tfm); 802 803 shash->tfm = tfm; 804 805 pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n", 806 d.alg_id); 807 808 rc = crypto_shash_init(shash); 809 if (rc != 0) 810 return rc; 811 812 /* cumulative digest over TPM registers 0-7 */ 813 for (i = TPM_PCR0; i < TPM_PCR8; i++) { 814 ima_pcrread(i, &d); 815 /* now accumulate with current aggregate */ 816 rc = crypto_shash_update(shash, d.digest, 817 crypto_shash_digestsize(tfm)); 818 if (rc != 0) 819 return rc; 820 } 821 /* 822 * Extend cumulative digest over TPM registers 8-9, which contain 823 * measurement for the kernel command line (reg. 8) and image (reg. 9) 824 * in a typical PCR allocation. Registers 8-9 are only included in 825 * non-SHA1 boot_aggregate digests to avoid ambiguity. 826 */ 827 if (alg_id != TPM_ALG_SHA1) { 828 for (i = TPM_PCR8; i < TPM_PCR10; i++) { 829 ima_pcrread(i, &d); 830 rc = crypto_shash_update(shash, d.digest, 831 crypto_shash_digestsize(tfm)); 832 } 833 } 834 if (!rc) 835 crypto_shash_final(shash, digest); 836 return rc; 837 } 838 839 int ima_calc_boot_aggregate(struct ima_digest_data *hash) 840 { 841 struct crypto_shash *tfm; 842 u16 crypto_id, alg_id; 843 int rc, i, bank_idx = -1; 844 845 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { 846 crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; 847 if (crypto_id == hash->algo) { 848 bank_idx = i; 849 break; 850 } 851 852 if (crypto_id == HASH_ALGO_SHA256) 853 bank_idx = i; 854 855 if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1) 856 bank_idx = i; 857 } 858 859 if (bank_idx == -1) { 860 pr_err("No suitable TPM algorithm for boot aggregate\n"); 861 return 0; 862 } 863 864 hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id; 865 866 tfm = ima_alloc_tfm(hash->algo); 867 if (IS_ERR(tfm)) 868 return PTR_ERR(tfm); 869 870 hash->length = crypto_shash_digestsize(tfm); 871 alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id; 872 rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm); 873 874 ima_free_tfm(tfm); 875 876 return rc; 877 } 878
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.