1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * sha256_base.h - core logic for SHA-256 implementations 4 * 5 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> 6 */ 7 8 #ifndef _CRYPTO_SHA256_BASE_H 9 #define _CRYPTO_SHA256_BASE_H 10 11 #include <asm/byteorder.h> 12 #include <asm/unaligned.h> 13 #include <crypto/internal/hash.h> 14 #include <crypto/sha2.h> 15 #include <linux/string.h> 16 #include <linux/types.h> 17 18 typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src, 19 int blocks); 20 21 static inline int sha224_base_init(struct shash_desc *desc) 22 { 23 struct sha256_state *sctx = shash_desc_ctx(desc); 24 25 sha224_init(sctx); 26 return 0; 27 } 28 29 static inline int sha256_base_init(struct shash_desc *desc) 30 { 31 struct sha256_state *sctx = shash_desc_ctx(desc); 32 33 sha256_init(sctx); 34 return 0; 35 } 36 37 static inline int lib_sha256_base_do_update(struct sha256_state *sctx, 38 const u8 *data, 39 unsigned int len, 40 sha256_block_fn *block_fn) 41 { 42 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; 43 44 sctx->count += len; 45 46 if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { 47 int blocks; 48 49 if (partial) { 50 int p = SHA256_BLOCK_SIZE - partial; 51 52 memcpy(sctx->buf + partial, data, p); 53 data += p; 54 len -= p; 55 56 block_fn(sctx, sctx->buf, 1); 57 } 58 59 blocks = len / SHA256_BLOCK_SIZE; 60 len %= SHA256_BLOCK_SIZE; 61 62 if (blocks) { 63 block_fn(sctx, data, blocks); 64 data += blocks * SHA256_BLOCK_SIZE; 65 } 66 partial = 0; 67 } 68 if (len) 69 memcpy(sctx->buf + partial, data, len); 70 71 return 0; 72 } 73 74 static inline int sha256_base_do_update(struct shash_desc *desc, 75 const u8 *data, 76 unsigned int len, 77 sha256_block_fn *block_fn) 78 { 79 struct sha256_state *sctx = shash_desc_ctx(desc); 80 81 return lib_sha256_base_do_update(sctx, data, len, block_fn); 82 } 83 84 static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx, 85 sha256_block_fn *block_fn) 86 { 87 const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64); 88 __be64 *bits = (__be64 *)(sctx->buf + bit_offset); 89 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; 90 91 sctx->buf[partial++] = 0x80; 92 if (partial > bit_offset) { 93 memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial); 94 partial = 0; 95 96 block_fn(sctx, sctx->buf, 1); 97 } 98 99 memset(sctx->buf + partial, 0x0, bit_offset - partial); 100 *bits = cpu_to_be64(sctx->count << 3); 101 block_fn(sctx, sctx->buf, 1); 102 103 return 0; 104 } 105 106 static inline int sha256_base_do_finalize(struct shash_desc *desc, 107 sha256_block_fn *block_fn) 108 { 109 struct sha256_state *sctx = shash_desc_ctx(desc); 110 111 return lib_sha256_base_do_finalize(sctx, block_fn); 112 } 113 114 static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out, 115 unsigned int digest_size) 116 { 117 __be32 *digest = (__be32 *)out; 118 int i; 119 120 for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) 121 put_unaligned_be32(sctx->state[i], digest++); 122 123 memzero_explicit(sctx, sizeof(*sctx)); 124 return 0; 125 } 126 127 static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) 128 { 129 unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 130 struct sha256_state *sctx = shash_desc_ctx(desc); 131 132 return lib_sha256_base_finish(sctx, out, digest_size); 133 } 134 135 #endif /* _CRYPTO_SHA256_BASE_H */ 136
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.