1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C)2006 USAGI/WIDE Project 4 * 5 * Author: 6 * Kazunori Miyazawa <miyazawa@linux-ipv6.org> 7 */ 8 9 #include <crypto/internal/cipher.h> 10 #include <crypto/internal/hash.h> 11 #include <linux/err.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 15 static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 16 0x02020202, 0x02020202, 0x02020202, 0x02020202, 17 0x03030303, 0x03030303, 0x03030303, 0x03030303}; 18 19 /* 20 * +------------------------ 21 * | <parent tfm> 22 * +------------------------ 23 * | xcbc_tfm_ctx 24 * +------------------------ 25 * | consts (block size * 2) 26 * +------------------------ 27 */ 28 struct xcbc_tfm_ctx { 29 struct crypto_cipher *child; 30 u8 consts[]; 31 }; 32 33 /* 34 * +------------------------ 35 * | <shash desc> 36 * +------------------------ 37 * | xcbc_desc_ctx 38 * +------------------------ 39 * | odds (block size) 40 * +------------------------ 41 * | prev (block size) 42 * +------------------------ 43 */ 44 struct xcbc_desc_ctx { 45 unsigned int len; 46 u8 odds[]; 47 }; 48 49 #define XCBC_BLOCKSIZE 16 50 51 static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, 52 const u8 *inkey, unsigned int keylen) 53 { 54 struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); 55 u8 *consts = ctx->consts; 56 int err = 0; 57 u8 key1[XCBC_BLOCKSIZE]; 58 int bs = sizeof(key1); 59 60 if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) 61 return err; 62 63 crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); 64 crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); 65 crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); 66 67 return crypto_cipher_setkey(ctx->child, key1, bs); 68 69 } 70 71 static int crypto_xcbc_digest_init(struct shash_desc *pdesc) 72 { 73 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); 74 int bs = crypto_shash_blocksize(pdesc->tfm); 75 u8 *prev = &ctx->odds[bs]; 76 77 ctx->len = 0; 78 memset(prev, 0, bs); 79 80 return 0; 81 } 82 83 static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, 84 unsigned int len) 85 { 86 struct crypto_shash *parent = pdesc->tfm; 87 struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); 88 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); 89 struct crypto_cipher *tfm = tctx->child; 90 int bs = crypto_shash_blocksize(parent); 91 u8 *odds = ctx->odds; 92 u8 *prev = odds + bs; 93 94 /* checking the data can fill the block */ 95 if ((ctx->len + len) <= bs) { 96 memcpy(odds + ctx->len, p, len); 97 ctx->len += len; 98 return 0; 99 } 100 101 /* filling odds with new data and encrypting it */ 102 memcpy(odds + ctx->len, p, bs - ctx->len); 103 len -= bs - ctx->len; 104 p += bs - ctx->len; 105 106 crypto_xor(prev, odds, bs); 107 crypto_cipher_encrypt_one(tfm, prev, prev); 108 109 /* clearing the length */ 110 ctx->len = 0; 111 112 /* encrypting the rest of data */ 113 while (len > bs) { 114 crypto_xor(prev, p, bs); 115 crypto_cipher_encrypt_one(tfm, prev, prev); 116 p += bs; 117 len -= bs; 118 } 119 120 /* keeping the surplus of blocksize */ 121 if (len) { 122 memcpy(odds, p, len); 123 ctx->len = len; 124 } 125 126 return 0; 127 } 128 129 static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) 130 { 131 struct crypto_shash *parent = pdesc->tfm; 132 struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); 133 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); 134 struct crypto_cipher *tfm = tctx->child; 135 int bs = crypto_shash_blocksize(parent); 136 u8 *odds = ctx->odds; 137 u8 *prev = odds + bs; 138 unsigned int offset = 0; 139 140 if (ctx->len != bs) { 141 unsigned int rlen; 142 u8 *p = odds + ctx->len; 143 144 *p = 0x80; 145 p++; 146 147 rlen = bs - ctx->len -1; 148 if (rlen) 149 memset(p, 0, rlen); 150 151 offset += bs; 152 } 153 154 crypto_xor(prev, odds, bs); 155 crypto_xor(prev, &tctx->consts[offset], bs); 156 157 crypto_cipher_encrypt_one(tfm, out, prev); 158 159 return 0; 160 } 161 162 static int xcbc_init_tfm(struct crypto_tfm *tfm) 163 { 164 struct crypto_cipher *cipher; 165 struct crypto_instance *inst = (void *)tfm->__crt_alg; 166 struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); 167 struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); 168 169 cipher = crypto_spawn_cipher(spawn); 170 if (IS_ERR(cipher)) 171 return PTR_ERR(cipher); 172 173 ctx->child = cipher; 174 175 return 0; 176 }; 177 178 static void xcbc_exit_tfm(struct crypto_tfm *tfm) 179 { 180 struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); 181 crypto_free_cipher(ctx->child); 182 } 183 184 static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) 185 { 186 struct shash_instance *inst; 187 struct crypto_cipher_spawn *spawn; 188 struct crypto_alg *alg; 189 u32 mask; 190 int err; 191 192 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); 193 if (err) 194 return err; 195 196 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 197 if (!inst) 198 return -ENOMEM; 199 spawn = shash_instance_ctx(inst); 200 201 err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), 202 crypto_attr_alg_name(tb[1]), 0, mask); 203 if (err) 204 goto err_free_inst; 205 alg = crypto_spawn_cipher_alg(spawn); 206 207 err = -EINVAL; 208 if (alg->cra_blocksize != XCBC_BLOCKSIZE) 209 goto err_free_inst; 210 211 err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); 212 if (err) 213 goto err_free_inst; 214 215 inst->alg.base.cra_priority = alg->cra_priority; 216 inst->alg.base.cra_blocksize = alg->cra_blocksize; 217 inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) + 218 alg->cra_blocksize * 2; 219 220 inst->alg.digestsize = alg->cra_blocksize; 221 inst->alg.descsize = sizeof(struct xcbc_desc_ctx) + 222 alg->cra_blocksize * 2; 223 224 inst->alg.base.cra_init = xcbc_init_tfm; 225 inst->alg.base.cra_exit = xcbc_exit_tfm; 226 227 inst->alg.init = crypto_xcbc_digest_init; 228 inst->alg.update = crypto_xcbc_digest_update; 229 inst->alg.final = crypto_xcbc_digest_final; 230 inst->alg.setkey = crypto_xcbc_digest_setkey; 231 232 inst->free = shash_free_singlespawn_instance; 233 234 err = shash_register_instance(tmpl, inst); 235 if (err) { 236 err_free_inst: 237 shash_free_singlespawn_instance(inst); 238 } 239 return err; 240 } 241 242 static struct crypto_template crypto_xcbc_tmpl = { 243 .name = "xcbc", 244 .create = xcbc_create, 245 .module = THIS_MODULE, 246 }; 247 248 static int __init crypto_xcbc_module_init(void) 249 { 250 return crypto_register_template(&crypto_xcbc_tmpl); 251 } 252 253 static void __exit crypto_xcbc_module_exit(void) 254 { 255 crypto_unregister_template(&crypto_xcbc_tmpl); 256 } 257 258 subsys_initcall(crypto_xcbc_module_init); 259 module_exit(crypto_xcbc_module_exit); 260 261 MODULE_LICENSE("GPL"); 262 MODULE_DESCRIPTION("XCBC keyed hash algorithm"); 263 MODULE_ALIAS_CRYPTO("xcbc"); 264 MODULE_IMPORT_NS(CRYPTO_INTERNAL); 265
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.