1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * Cryptographic API. 2 * Cryptographic API. 4 * 3 * 5 * Single-block cipher operations. !! 4 * Cipher operations. 6 * 5 * 7 * Copyright (c) 2002 James Morris <jmorris@in 6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 8 * Copyright (c) 2005 Herbert Xu <herbert@gond !! 7 * >> 8 * This program is free software; you can redistribute it and/or modify it >> 9 * under the terms of the GNU General Public License as published by the Free >> 10 * Software Foundation; either version 2 of the License, or (at your option) >> 11 * any later version. >> 12 * 9 */ 13 */ 10 << 11 #include <crypto/algapi.h> << 12 #include <crypto/internal/cipher.h> << 13 #include <linux/kernel.h> 14 #include <linux/kernel.h> 14 #include <linux/crypto.h> 15 #include <linux/crypto.h> 15 #include <linux/errno.h> 16 #include <linux/errno.h> >> 17 #include <linux/mm.h> 16 #include <linux/slab.h> 18 #include <linux/slab.h> 17 #include <linux/string.h> !! 19 #include <asm/scatterlist.h> 18 #include "internal.h" 20 #include "internal.h" >> 21 #include "scatterwalk.h" >> 22 >> 23 typedef void (cryptfn_t)(void *, u8 *, const u8 *); >> 24 typedef void (procfn_t)(struct crypto_tfm *, u8 *, >> 25 u8*, cryptfn_t, int enc, void *, int); 19 26 20 static int setkey_unaligned(struct crypto_ciph !! 27 static inline void xor_64(u8 *a, const u8 *b) 21 unsigned int keyle << 22 { 28 { 23 struct cipher_alg *cia = crypto_cipher !! 29 ((u32 *)a)[0] ^= ((u32 *)b)[0]; 24 unsigned long alignmask = crypto_ciphe !! 30 ((u32 *)a)[1] ^= ((u32 *)b)[1]; 25 int ret; !! 31 } 26 u8 *buffer, *alignbuffer; << 27 unsigned long absize; << 28 << 29 absize = keylen + alignmask; << 30 buffer = kmalloc(absize, GFP_ATOMIC); << 31 if (!buffer) << 32 return -ENOMEM; << 33 << 34 alignbuffer = (u8 *)ALIGN((unsigned lo << 35 memcpy(alignbuffer, key, keylen); << 36 ret = cia->cia_setkey(crypto_cipher_tf << 37 kfree_sensitive(buffer); << 38 return ret; << 39 32 >> 33 static inline void xor_128(u8 *a, const u8 *b) >> 34 { >> 35 ((u32 *)a)[0] ^= ((u32 *)b)[0]; >> 36 ((u32 *)a)[1] ^= ((u32 *)b)[1]; >> 37 ((u32 *)a)[2] ^= ((u32 *)b)[2]; >> 38 ((u32 *)a)[3] ^= ((u32 *)b)[3]; 40 } 39 } 41 40 42 int crypto_cipher_setkey(struct crypto_cipher !! 41 43 const u8 *key, unsign !! 42 /* >> 43 * Generic encrypt/decrypt wrapper for ciphers, handles operations across >> 44 * multiple page boundaries by using temporary blocks. In user context, >> 45 * the kernel is given a chance to schedule us once per block. >> 46 */ >> 47 static int crypt(struct crypto_tfm *tfm, >> 48 struct scatterlist *dst, >> 49 struct scatterlist *src, >> 50 unsigned int nbytes, cryptfn_t crfn, >> 51 procfn_t prfn, int enc, void *info) 44 { 52 { 45 struct cipher_alg *cia = crypto_cipher !! 53 struct scatter_walk walk_in, walk_out; 46 unsigned long alignmask = crypto_ciphe !! 54 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); >> 55 u8 tmp_src[bsize]; >> 56 u8 tmp_dst[bsize]; >> 57 >> 58 if (!nbytes) >> 59 return 0; 47 60 48 if (keylen < cia->cia_min_keysize || k !! 61 if (nbytes % bsize) { >> 62 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; 49 return -EINVAL; 63 return -EINVAL; >> 64 } 50 65 51 if ((unsigned long)key & alignmask) !! 66 scatterwalk_start(&walk_in, src); 52 return setkey_unaligned(tfm, k !! 67 scatterwalk_start(&walk_out, dst); 53 68 54 return cia->cia_setkey(crypto_cipher_t !! 69 for(;;) { 55 } !! 70 u8 *src_p, *dst_p; 56 EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRY !! 71 int in_place; 57 72 58 static inline void cipher_crypt_one(struct cry !! 73 scatterwalk_map(&walk_in, 0); 59 u8 *dst, c !! 74 scatterwalk_map(&walk_out, 1); 60 { !! 75 src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src); 61 unsigned long alignmask = crypto_ciphe !! 76 dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst); 62 struct cipher_alg *cia = crypto_cipher !! 77 in_place = scatterwalk_samebuf(&walk_in, &walk_out, 63 void (*fn)(struct crypto_tfm *, u8 *, !! 78 src_p, dst_p); 64 enc ? cia->cia_encrypt : cia-> !! 79 >> 80 nbytes -= bsize; >> 81 >> 82 scatterwalk_copychunks(src_p, &walk_in, bsize, 0); >> 83 >> 84 prfn(tfm, dst_p, src_p, crfn, enc, info, in_place); 65 85 66 if (unlikely(((unsigned long)dst | (un !! 86 scatterwalk_done(&walk_in, 0, nbytes); 67 unsigned int bs = crypto_ciphe << 68 u8 buffer[MAX_CIPHER_BLOCKSIZE << 69 u8 *tmp = (u8 *)ALIGN((unsigne << 70 87 71 memcpy(tmp, src, bs); !! 88 scatterwalk_copychunks(dst_p, &walk_out, bsize, 1); 72 fn(crypto_cipher_tfm(tfm), tmp !! 89 scatterwalk_done(&walk_out, 1, nbytes); 73 memcpy(dst, tmp, bs); !! 90 >> 91 if (!nbytes) >> 92 return 0; >> 93 >> 94 crypto_yield(tfm); >> 95 } >> 96 } >> 97 >> 98 static void cbc_process(struct crypto_tfm *tfm, u8 *dst, u8 *src, >> 99 cryptfn_t fn, int enc, void *info, int in_place) >> 100 { >> 101 u8 *iv = info; >> 102 >> 103 /* Null encryption */ >> 104 if (!iv) >> 105 return; >> 106 >> 107 if (enc) { >> 108 tfm->crt_u.cipher.cit_xor_block(iv, src); >> 109 fn(crypto_tfm_ctx(tfm), dst, iv); >> 110 memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm)); 74 } else { 111 } else { 75 fn(crypto_cipher_tfm(tfm), dst !! 112 u8 stack[in_place ? crypto_tfm_alg_blocksize(tfm) : 0]; >> 113 u8 *buf = in_place ? stack : dst; >> 114 >> 115 fn(crypto_tfm_ctx(tfm), buf, src); >> 116 tfm->crt_u.cipher.cit_xor_block(buf, iv); >> 117 memcpy(iv, src, crypto_tfm_alg_blocksize(tfm)); >> 118 if (buf != dst) >> 119 memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm)); 76 } 120 } 77 } 121 } 78 122 79 void crypto_cipher_encrypt_one(struct crypto_c !! 123 static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src, 80 u8 *dst, const !! 124 cryptfn_t fn, int enc, void *info, int in_place) 81 { 125 { 82 cipher_crypt_one(tfm, dst, src, true); !! 126 fn(crypto_tfm_ctx(tfm), dst, src); 83 } 127 } 84 EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one << 85 128 86 void crypto_cipher_decrypt_one(struct crypto_c !! 129 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 87 u8 *dst, const << 88 { 130 { 89 cipher_crypt_one(tfm, dst, src, false) !! 131 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; >> 132 >> 133 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { >> 134 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; >> 135 return -EINVAL; >> 136 } else >> 137 return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen, >> 138 &tfm->crt_flags); 90 } 139 } 91 EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one << 92 140 93 struct crypto_cipher *crypto_clone_cipher(stru !! 141 static int ecb_encrypt(struct crypto_tfm *tfm, >> 142 struct scatterlist *dst, >> 143 struct scatterlist *src, unsigned int nbytes) 94 { 144 { 95 struct crypto_tfm *tfm = crypto_cipher !! 145 return crypt(tfm, dst, src, nbytes, 96 struct crypto_alg *alg = tfm->__crt_al !! 146 tfm->__crt_alg->cra_cipher.cia_encrypt, 97 struct crypto_cipher *ncipher; !! 147 ecb_process, 1, NULL); 98 struct crypto_tfm *ntfm; !! 148 } 99 149 100 if (alg->cra_init) !! 150 static int ecb_encrypt_iv(struct crypto_tfm *tfm, 101 return ERR_PTR(-ENOSYS); !! 151 struct scatterlist *dst, >> 152 struct scatterlist *src, >> 153 unsigned int nbytes, u8 *iv) >> 154 { >> 155 ecb_encrypt(tfm, dst, src, nbytes); >> 156 return -ENOSYS; >> 157 } 102 158 103 if (unlikely(!crypto_mod_get(alg))) !! 159 static int ecb_decrypt(struct crypto_tfm *tfm, 104 return ERR_PTR(-ESTALE); !! 160 struct scatterlist *dst, >> 161 struct scatterlist *src, >> 162 unsigned int nbytes) >> 163 { >> 164 return crypt(tfm, dst, src, nbytes, >> 165 tfm->__crt_alg->cra_cipher.cia_decrypt, >> 166 ecb_process, 1, NULL); >> 167 } 105 168 106 ntfm = __crypto_alloc_tfmgfp(alg, CRYP !! 169 static int ecb_decrypt_iv(struct crypto_tfm *tfm, 107 CRYPTO_AL !! 170 struct scatterlist *dst, 108 if (IS_ERR(ntfm)) { !! 171 struct scatterlist *src, 109 crypto_mod_put(alg); !! 172 unsigned int nbytes, u8 *iv) 110 return ERR_CAST(ntfm); !! 173 { 111 } !! 174 ecb_decrypt(tfm, dst, src, nbytes); >> 175 return -ENOSYS; >> 176 } 112 177 113 ntfm->crt_flags = tfm->crt_flags; !! 178 static int cbc_encrypt(struct crypto_tfm *tfm, >> 179 struct scatterlist *dst, >> 180 struct scatterlist *src, >> 181 unsigned int nbytes) >> 182 { >> 183 return crypt(tfm, dst, src, nbytes, >> 184 tfm->__crt_alg->cra_cipher.cia_encrypt, >> 185 cbc_process, 1, tfm->crt_cipher.cit_iv); >> 186 } >> 187 >> 188 static int cbc_encrypt_iv(struct crypto_tfm *tfm, >> 189 struct scatterlist *dst, >> 190 struct scatterlist *src, >> 191 unsigned int nbytes, u8 *iv) >> 192 { >> 193 return crypt(tfm, dst, src, nbytes, >> 194 tfm->__crt_alg->cra_cipher.cia_encrypt, >> 195 cbc_process, 1, iv); >> 196 } >> 197 >> 198 static int cbc_decrypt(struct crypto_tfm *tfm, >> 199 struct scatterlist *dst, >> 200 struct scatterlist *src, >> 201 unsigned int nbytes) >> 202 { >> 203 return crypt(tfm, dst, src, nbytes, >> 204 tfm->__crt_alg->cra_cipher.cia_decrypt, >> 205 cbc_process, 0, tfm->crt_cipher.cit_iv); >> 206 } >> 207 >> 208 static int cbc_decrypt_iv(struct crypto_tfm *tfm, >> 209 struct scatterlist *dst, >> 210 struct scatterlist *src, >> 211 unsigned int nbytes, u8 *iv) >> 212 { >> 213 return crypt(tfm, dst, src, nbytes, >> 214 tfm->__crt_alg->cra_cipher.cia_decrypt, >> 215 cbc_process, 0, iv); >> 216 } >> 217 >> 218 /* >> 219 * nocrypt*() zeroize the destination buffer to make sure we don't leak >> 220 * uninitialized memory contents if the caller ignores the return value. >> 221 * This is bad since the data in the source buffer is unused and may be >> 222 * lost, but an infoleak would be even worse. The performance cost of >> 223 * memset() is irrelevant since a well-behaved caller would not bump into >> 224 * the error repeatedly. >> 225 */ >> 226 static int nocrypt(struct crypto_tfm *tfm, >> 227 struct scatterlist *dst, >> 228 struct scatterlist *src, >> 229 unsigned int nbytes) >> 230 { >> 231 memset(dst, 0, nbytes); >> 232 return -ENOSYS; >> 233 } 114 234 115 ncipher = __crypto_cipher_cast(ntfm); !! 235 static int nocrypt_iv(struct crypto_tfm *tfm, >> 236 struct scatterlist *dst, >> 237 struct scatterlist *src, >> 238 unsigned int nbytes, u8 *iv) >> 239 { >> 240 memset(dst, 0, nbytes); >> 241 return -ENOSYS; >> 242 } >> 243 >> 244 int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags) >> 245 { >> 246 u32 mode = flags & CRYPTO_TFM_MODE_MASK; >> 247 >> 248 tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB; >> 249 if (flags & CRYPTO_TFM_REQ_WEAK_KEY) >> 250 tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY; >> 251 >> 252 return 0; >> 253 } >> 254 >> 255 int crypto_init_cipher_ops(struct crypto_tfm *tfm) >> 256 { >> 257 int ret = 0; >> 258 struct cipher_tfm *ops = &tfm->crt_cipher; >> 259 >> 260 ops->cit_setkey = setkey; 116 261 117 return ncipher; !! 262 switch (tfm->crt_cipher.cit_mode) { >> 263 case CRYPTO_TFM_MODE_ECB: >> 264 ops->cit_encrypt = ecb_encrypt; >> 265 ops->cit_decrypt = ecb_decrypt; >> 266 /* These should have been nocrypt_iv, but patch-cryptoloop-jari-2.4.22.0 >> 267 * (and its other revisions) directly calls the *_iv() functions even in >> 268 * ECB mode and ignores their return value. */ >> 269 ops->cit_encrypt_iv = ecb_encrypt_iv; >> 270 ops->cit_decrypt_iv = ecb_decrypt_iv; >> 271 break; >> 272 >> 273 case CRYPTO_TFM_MODE_CBC: >> 274 ops->cit_encrypt = cbc_encrypt; >> 275 ops->cit_decrypt = cbc_decrypt; >> 276 ops->cit_encrypt_iv = cbc_encrypt_iv; >> 277 ops->cit_decrypt_iv = cbc_decrypt_iv; >> 278 break; >> 279 >> 280 case CRYPTO_TFM_MODE_CFB: >> 281 ops->cit_encrypt = nocrypt; >> 282 ops->cit_decrypt = nocrypt; >> 283 ops->cit_encrypt_iv = nocrypt_iv; >> 284 ops->cit_decrypt_iv = nocrypt_iv; >> 285 break; >> 286 >> 287 case CRYPTO_TFM_MODE_CTR: >> 288 ops->cit_encrypt = nocrypt; >> 289 ops->cit_decrypt = nocrypt; >> 290 ops->cit_encrypt_iv = nocrypt_iv; >> 291 ops->cit_decrypt_iv = nocrypt_iv; >> 292 break; >> 293 >> 294 default: >> 295 BUG(); >> 296 } >> 297 >> 298 if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) { >> 299 >> 300 switch (crypto_tfm_alg_blocksize(tfm)) { >> 301 case 8: >> 302 ops->cit_xor_block = xor_64; >> 303 break; >> 304 >> 305 case 16: >> 306 ops->cit_xor_block = xor_128; >> 307 break; >> 308 >> 309 default: >> 310 printk(KERN_WARNING "%s: block size %u not supported\n", >> 311 crypto_tfm_alg_name(tfm), >> 312 crypto_tfm_alg_blocksize(tfm)); >> 313 ret = -EINVAL; >> 314 goto out; >> 315 } >> 316 >> 317 ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm); >> 318 ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL); >> 319 if (ops->cit_iv == NULL) >> 320 ret = -ENOMEM; >> 321 } >> 322 >> 323 out: >> 324 return ret; >> 325 } >> 326 >> 327 void crypto_exit_cipher_ops(struct crypto_tfm *tfm) >> 328 { >> 329 if (tfm->crt_cipher.cit_iv) >> 330 kfree(tfm->crt_cipher.cit_iv); 118 } 331 } 119 EXPORT_SYMBOL_GPL(crypto_clone_cipher); << 120 332
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.