1 /* SPDX-License-Identifier: GPL-2.0-or-later * << 2 /* 1 /* 3 * Scatterlist Cryptographic API. 2 * Scatterlist Cryptographic API. 4 * 3 * 5 * Copyright (c) 2002 James Morris <jmorris@in 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 6 * Copyright (c) 2002 David S. Miller (davem@r 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 7 * Copyright (c) 2005 Herbert Xu <herbert@gond 6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 8 * 7 * 9 * Portions derived from Cryptoapi, by Alexand 8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 10 * and Nettle, by Niels Möller. 9 * and Nettle, by Niels Möller. >> 10 * >> 11 * This program is free software; you can redistribute it and/or modify it >> 12 * under the terms of the GNU General Public License as published by the Free >> 13 * Software Foundation; either version 2 of the License, or (at your option) >> 14 * any later version. >> 15 * 11 */ 16 */ 12 #ifndef _LINUX_CRYPTO_H 17 #ifndef _LINUX_CRYPTO_H 13 #define _LINUX_CRYPTO_H 18 #define _LINUX_CRYPTO_H 14 19 15 #include <linux/completion.h> !! 20 #include <linux/atomic.h> 16 #include <linux/refcount.h> !! 21 #include <linux/kernel.h> >> 22 #include <linux/list.h> >> 23 #include <linux/bug.h> 17 #include <linux/slab.h> 24 #include <linux/slab.h> 18 #include <linux/types.h> !! 25 #include <linux/string.h> >> 26 #include <linux/uaccess.h> >> 27 >> 28 /* >> 29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing >> 30 * arbitrary modules to be loaded. Loading from userspace may still need the >> 31 * unprefixed names, so retains those aliases as well. >> 32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 >> 33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro >> 34 * expands twice on the same line. Instead, use a separate base name for the >> 35 * alias. >> 36 */ >> 37 #define MODULE_ALIAS_CRYPTO(name) \ >> 38 __MODULE_INFO(alias, alias_userspace, name); \ >> 39 __MODULE_INFO(alias, alias_crypto, "crypto-" name) 19 40 20 /* 41 /* 21 * Algorithm masks and types. 42 * Algorithm masks and types. 22 */ 43 */ 23 #define CRYPTO_ALG_TYPE_MASK 0x0000 44 #define CRYPTO_ALG_TYPE_MASK 0x0000000f 24 #define CRYPTO_ALG_TYPE_CIPHER 0x0000 45 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 25 #define CRYPTO_ALG_TYPE_COMPRESS 0x0000 46 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 26 #define CRYPTO_ALG_TYPE_AEAD 0x0000 47 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 27 #define CRYPTO_ALG_TYPE_LSKCIPHER 0x0000 !! 48 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 >> 49 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 28 #define CRYPTO_ALG_TYPE_SKCIPHER 0x0000 50 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 29 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000 !! 51 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 30 #define CRYPTO_ALG_TYPE_SIG 0x0000 << 31 #define CRYPTO_ALG_TYPE_KPP 0x0000 52 #define CRYPTO_ALG_TYPE_KPP 0x00000008 32 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000 53 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 33 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000 54 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 34 #define CRYPTO_ALG_TYPE_RNG 0x0000 55 #define CRYPTO_ALG_TYPE_RNG 0x0000000c >> 56 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d >> 57 #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e 35 #define CRYPTO_ALG_TYPE_HASH 0x0000 58 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 36 #define CRYPTO_ALG_TYPE_SHASH 0x0000 59 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 37 #define CRYPTO_ALG_TYPE_AHASH 0x0000 60 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f 38 61 >> 62 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e >> 63 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e >> 64 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c 39 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000 65 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e 40 66 41 #define CRYPTO_ALG_LARVAL 0x0000 67 #define CRYPTO_ALG_LARVAL 0x00000010 42 #define CRYPTO_ALG_DEAD 0x0000 68 #define CRYPTO_ALG_DEAD 0x00000020 43 #define CRYPTO_ALG_DYING 0x0000 69 #define CRYPTO_ALG_DYING 0x00000040 44 #define CRYPTO_ALG_ASYNC 0x0000 70 #define CRYPTO_ALG_ASYNC 0x00000080 45 71 46 /* 72 /* 47 * Set if the algorithm (or an algorithm which !! 73 * Set this bit if and only if the algorithm requires another algorithm of 48 * algorithm of the same type to handle corner !! 74 * the same type to handle corner cases. 49 */ 75 */ 50 #define CRYPTO_ALG_NEED_FALLBACK 0x0000 76 #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 51 77 52 /* 78 /* >> 79 * This bit is set for symmetric key ciphers that have already been wrapped >> 80 * with a generic IV generator to prevent them from being wrapped again. >> 81 */ >> 82 #define CRYPTO_ALG_GENIV 0x00000200 >> 83 >> 84 /* 53 * Set if the algorithm has passed automated r 85 * Set if the algorithm has passed automated run-time testing. Note that 54 * if there is no run-time testing for a given 86 * if there is no run-time testing for a given algorithm it is considered 55 * to have passed. 87 * to have passed. 56 */ 88 */ 57 89 58 #define CRYPTO_ALG_TESTED 0x0000 90 #define CRYPTO_ALG_TESTED 0x00000400 59 91 60 /* 92 /* 61 * Set if the algorithm is an instance that is 93 * Set if the algorithm is an instance that is built from templates. 62 */ 94 */ 63 #define CRYPTO_ALG_INSTANCE 0x0000 95 #define CRYPTO_ALG_INSTANCE 0x00000800 64 96 65 /* Set this bit if the algorithm provided is h 97 /* Set this bit if the algorithm provided is hardware accelerated but 66 * not available to userspace via instruction 98 * not available to userspace via instruction set or so. 67 */ 99 */ 68 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x0000 100 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 69 101 70 /* 102 /* 71 * Mark a cipher as a service implementation o 103 * Mark a cipher as a service implementation only usable by another 72 * cipher and never by a normal user of the ke 104 * cipher and never by a normal user of the kernel crypto API 73 */ 105 */ 74 #define CRYPTO_ALG_INTERNAL 0x0000 106 #define CRYPTO_ALG_INTERNAL 0x00002000 75 107 76 /* 108 /* 77 * Set if the algorithm has a ->setkey() metho << 78 * calling it first, i.e. there is a default k << 79 */ << 80 #define CRYPTO_ALG_OPTIONAL_KEY 0x0000 << 81 << 82 /* << 83 * Don't trigger module loading << 84 */ << 85 #define CRYPTO_NOLOAD 0x0000 << 86 << 87 /* << 88 * The algorithm may allocate memory during re << 89 * encryption, decryption, or hashing. Users << 90 * flag unset if they can't handle memory allo << 91 * << 92 * This flag is currently only implemented for << 93 * "aead", "ahash", "shash", and "cipher". Al << 94 * have this flag set even if they allocate me << 95 * << 96 * In some edge cases, algorithms can allocate << 97 * To avoid these cases, users must obey the f << 98 * skcipher: << 99 * - The IV buffer and all scatterlist el << 100 * algorithm's alignmask. << 101 * - If the data were to be divided into << 102 * crypto_skcipher_walksize() (with any << 103 * chunk can cross a page boundary or a << 104 * aead: << 105 * - The IV buffer and all scatterlist el << 106 * algorithm's alignmask. << 107 * - The first scatterlist element must c << 108 * and its pages must be !PageHighMem. << 109 * - If the plaintext/ciphertext were to << 110 * crypto_aead_walksize() (with the rem << 111 * can cross a page boundary or a scatt << 112 * ahash: << 113 * - crypto_ahash_finup() must not be use << 114 * ->finup() natively. << 115 */ << 116 #define CRYPTO_ALG_ALLOCATES_MEMORY 0x0001 << 117 << 118 /* << 119 * Mark an algorithm as a service implementati << 120 * template and never by a normal user of the << 121 * This is intended to be used by algorithms t << 122 * not FIPS-approved but may instead be used t << 123 * a FIPS-approved algorithm (e.g., dh vs. ffd << 124 */ << 125 #define CRYPTO_ALG_FIPS_INTERNAL 0x0002 << 126 << 127 /* << 128 * Transform masks and values (for crt_flags). 109 * Transform masks and values (for crt_flags). 129 */ 110 */ 130 #define CRYPTO_TFM_NEED_KEY 0x0000 << 131 << 132 #define CRYPTO_TFM_REQ_MASK 0x000f 111 #define CRYPTO_TFM_REQ_MASK 0x000fff00 133 #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x0000 !! 112 #define CRYPTO_TFM_RES_MASK 0xfff00000 >> 113 >> 114 #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 134 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x0000 115 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 135 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x0000 116 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 >> 117 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 >> 118 #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 >> 119 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 >> 120 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 >> 121 #define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 136 122 137 /* 123 /* 138 * Miscellaneous stuff. 124 * Miscellaneous stuff. 139 */ 125 */ 140 #define CRYPTO_MAX_ALG_NAME 128 126 #define CRYPTO_MAX_ALG_NAME 128 141 127 142 /* 128 /* 143 * The macro CRYPTO_MINALIGN_ATTR (along with 129 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 144 * declaration) is used to ensure that the cry 130 * declaration) is used to ensure that the crypto_tfm context structure is 145 * aligned correctly for the given architectur 131 * aligned correctly for the given architecture so that there are no alignment 146 * faults for C data types. On architectures !! 132 * faults for C data types. In particular, this is required on platforms such 147 * DMA, such as ARM or arm64, it also takes in !! 133 * as arm where pointers are 32-bit aligned but there are data types such as 148 * that is required to ensure that the context !! 134 * u64 which require 64-bit alignment. 149 * cachelines with the rest of the struct. Thi << 150 * maintenance for non-coherent DMA (cache inv << 151 * affect data that may be accessed by the CPU << 152 */ 135 */ 153 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 136 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 154 137 155 #define CRYPTO_MINALIGN_ATTR __attribute__ ((_ 138 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 156 139 >> 140 struct scatterlist; >> 141 struct crypto_ablkcipher; >> 142 struct crypto_async_request; >> 143 struct crypto_blkcipher; 157 struct crypto_tfm; 144 struct crypto_tfm; 158 struct crypto_type; 145 struct crypto_type; 159 struct module; !! 146 struct skcipher_givcrypt_request; 160 147 161 typedef void (*crypto_completion_t)(void *req, !! 148 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 162 149 163 /** 150 /** 164 * DOC: Block Cipher Context Data Structures 151 * DOC: Block Cipher Context Data Structures 165 * 152 * 166 * These data structures define the operating 153 * These data structures define the operating context for each block cipher 167 * type. 154 * type. 168 */ 155 */ 169 156 170 struct crypto_async_request { 157 struct crypto_async_request { 171 struct list_head list; 158 struct list_head list; 172 crypto_completion_t complete; 159 crypto_completion_t complete; 173 void *data; 160 void *data; 174 struct crypto_tfm *tfm; 161 struct crypto_tfm *tfm; 175 162 176 u32 flags; 163 u32 flags; 177 }; 164 }; 178 165 >> 166 struct ablkcipher_request { >> 167 struct crypto_async_request base; >> 168 >> 169 unsigned int nbytes; >> 170 >> 171 void *info; >> 172 >> 173 struct scatterlist *src; >> 174 struct scatterlist *dst; >> 175 >> 176 void *__ctx[] CRYPTO_MINALIGN_ATTR; >> 177 }; >> 178 >> 179 struct blkcipher_desc { >> 180 struct crypto_blkcipher *tfm; >> 181 void *info; >> 182 u32 flags; >> 183 }; >> 184 >> 185 struct cipher_desc { >> 186 struct crypto_tfm *tfm; >> 187 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); >> 188 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, >> 189 const u8 *src, unsigned int nbytes); >> 190 void *info; >> 191 }; >> 192 179 /** 193 /** 180 * DOC: Block Cipher Algorithm Definitions 194 * DOC: Block Cipher Algorithm Definitions 181 * 195 * 182 * These data structures define modular crypto 196 * These data structures define modular crypto algorithm implementations, 183 * managed via crypto_register_alg() and crypt 197 * managed via crypto_register_alg() and crypto_unregister_alg(). 184 */ 198 */ 185 199 186 /** 200 /** >> 201 * struct ablkcipher_alg - asynchronous block cipher definition >> 202 * @min_keysize: Minimum key size supported by the transformation. This is the >> 203 * smallest key length supported by this transformation algorithm. >> 204 * This must be set to one of the pre-defined values as this is >> 205 * not hardware specific. Possible values for this field can be >> 206 * found via git grep "_MIN_KEY_SIZE" include/crypto/ >> 207 * @max_keysize: Maximum key size supported by the transformation. This is the >> 208 * largest key length supported by this transformation algorithm. >> 209 * This must be set to one of the pre-defined values as this is >> 210 * not hardware specific. Possible values for this field can be >> 211 * found via git grep "_MAX_KEY_SIZE" include/crypto/ >> 212 * @setkey: Set key for the transformation. This function is used to either >> 213 * program a supplied key into the hardware or store the key in the >> 214 * transformation context for programming it later. Note that this >> 215 * function does modify the transformation context. This function can >> 216 * be called multiple times during the existence of the transformation >> 217 * object, so one must make sure the key is properly reprogrammed into >> 218 * the hardware. This function is also responsible for checking the key >> 219 * length for validity. In case a software fallback was put in place in >> 220 * the @cra_init call, this function might need to use the fallback if >> 221 * the algorithm doesn't support all of the key sizes. >> 222 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt >> 223 * the supplied scatterlist containing the blocks of data. The crypto >> 224 * API consumer is responsible for aligning the entries of the >> 225 * scatterlist properly and making sure the chunks are correctly >> 226 * sized. In case a software fallback was put in place in the >> 227 * @cra_init call, this function might need to use the fallback if >> 228 * the algorithm doesn't support all of the key sizes. In case the >> 229 * key was stored in transformation context, the key might need to be >> 230 * re-programmed into the hardware in this function. This function >> 231 * shall not modify the transformation context, as this function may >> 232 * be called in parallel with the same transformation object. >> 233 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt >> 234 * and the conditions are exactly the same. >> 235 * @givencrypt: Update the IV for encryption. With this function, a cipher >> 236 * implementation may provide the function on how to update the IV >> 237 * for encryption. >> 238 * @givdecrypt: Update the IV for decryption. This is the reverse of >> 239 * @givencrypt . >> 240 * @geniv: The transformation implementation may use an "IV generator" provided >> 241 * by the kernel crypto API. Several use cases have a predefined >> 242 * approach how IVs are to be updated. For such use cases, the kernel >> 243 * crypto API provides ready-to-use implementations that can be >> 244 * referenced with this variable. >> 245 * @ivsize: IV size applicable for transformation. The consumer must provide an >> 246 * IV of exactly that size to perform the encrypt or decrypt operation. >> 247 * >> 248 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are >> 249 * mandatory and must be filled. >> 250 */ >> 251 struct ablkcipher_alg { >> 252 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, >> 253 unsigned int keylen); >> 254 int (*encrypt)(struct ablkcipher_request *req); >> 255 int (*decrypt)(struct ablkcipher_request *req); >> 256 int (*givencrypt)(struct skcipher_givcrypt_request *req); >> 257 int (*givdecrypt)(struct skcipher_givcrypt_request *req); >> 258 >> 259 const char *geniv; >> 260 >> 261 unsigned int min_keysize; >> 262 unsigned int max_keysize; >> 263 unsigned int ivsize; >> 264 }; >> 265 >> 266 /** >> 267 * struct blkcipher_alg - synchronous block cipher definition >> 268 * @min_keysize: see struct ablkcipher_alg >> 269 * @max_keysize: see struct ablkcipher_alg >> 270 * @setkey: see struct ablkcipher_alg >> 271 * @encrypt: see struct ablkcipher_alg >> 272 * @decrypt: see struct ablkcipher_alg >> 273 * @geniv: see struct ablkcipher_alg >> 274 * @ivsize: see struct ablkcipher_alg >> 275 * >> 276 * All fields except @geniv and @ivsize are mandatory and must be filled. >> 277 */ >> 278 struct blkcipher_alg { >> 279 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, >> 280 unsigned int keylen); >> 281 int (*encrypt)(struct blkcipher_desc *desc, >> 282 struct scatterlist *dst, struct scatterlist *src, >> 283 unsigned int nbytes); >> 284 int (*decrypt)(struct blkcipher_desc *desc, >> 285 struct scatterlist *dst, struct scatterlist *src, >> 286 unsigned int nbytes); >> 287 >> 288 const char *geniv; >> 289 >> 290 unsigned int min_keysize; >> 291 unsigned int max_keysize; >> 292 unsigned int ivsize; >> 293 }; >> 294 >> 295 /** 187 * struct cipher_alg - single-block symmetric 296 * struct cipher_alg - single-block symmetric ciphers definition 188 * @cia_min_keysize: Minimum key size supporte 297 * @cia_min_keysize: Minimum key size supported by the transformation. This is 189 * the smallest key length s 298 * the smallest key length supported by this transformation 190 * algorithm. This must be s 299 * algorithm. This must be set to one of the pre-defined 191 * values as this is not har 300 * values as this is not hardware specific. Possible values 192 * for this field can be fou 301 * for this field can be found via git grep "_MIN_KEY_SIZE" 193 * include/crypto/ 302 * include/crypto/ 194 * @cia_max_keysize: Maximum key size supporte 303 * @cia_max_keysize: Maximum key size supported by the transformation. This is 195 * the largest key length sup 304 * the largest key length supported by this transformation 196 * algorithm. This must be se 305 * algorithm. This must be set to one of the pre-defined values 197 * as this is not hardware sp 306 * as this is not hardware specific. Possible values for this 198 * field can be found via git 307 * field can be found via git grep "_MAX_KEY_SIZE" 199 * include/crypto/ 308 * include/crypto/ 200 * @cia_setkey: Set key for the transformation 309 * @cia_setkey: Set key for the transformation. This function is used to either 201 * program a supplied key into th 310 * program a supplied key into the hardware or store the key in the 202 * transformation context for pro 311 * transformation context for programming it later. Note that this 203 * function does modify the trans 312 * function does modify the transformation context. This function 204 * can be called multiple times d 313 * can be called multiple times during the existence of the 205 * transformation object, so one 314 * transformation object, so one must make sure the key is properly 206 * reprogrammed into the hardware 315 * reprogrammed into the hardware. This function is also 207 * responsible for checking the k 316 * responsible for checking the key length for validity. 208 * @cia_encrypt: Encrypt a single block. This 317 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 209 * single block of data, which m 318 * single block of data, which must be @cra_blocksize big. This 210 * always operates on a full @cr 319 * always operates on a full @cra_blocksize and it is not possible 211 * to encrypt a block of smaller 320 * to encrypt a block of smaller size. The supplied buffers must 212 * therefore also be at least of 321 * therefore also be at least of @cra_blocksize size. Both the 213 * input and output buffers are 322 * input and output buffers are always aligned to @cra_alignmask. 214 * In case either of the input o 323 * In case either of the input or output buffer supplied by user 215 * of the crypto API is not alig 324 * of the crypto API is not aligned to @cra_alignmask, the crypto 216 * API will re-align the buffers 325 * API will re-align the buffers. The re-alignment means that a 217 * new buffer will be allocated, 326 * new buffer will be allocated, the data will be copied into the 218 * new buffer, then the processi 327 * new buffer, then the processing will happen on the new buffer, 219 * then the data will be copied 328 * then the data will be copied back into the original buffer and 220 * finally the new buffer will b 329 * finally the new buffer will be freed. In case a software 221 * fallback was put in place in 330 * fallback was put in place in the @cra_init call, this function 222 * might need to use the fallbac 331 * might need to use the fallback if the algorithm doesn't support 223 * all of the key sizes. In case 332 * all of the key sizes. In case the key was stored in 224 * transformation context, the k 333 * transformation context, the key might need to be re-programmed 225 * into the hardware in this fun 334 * into the hardware in this function. This function shall not 226 * modify the transformation con 335 * modify the transformation context, as this function may be 227 * called in parallel with the s 336 * called in parallel with the same transformation object. 228 * @cia_decrypt: Decrypt a single block. This 337 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 229 * @cia_encrypt, and the conditi 338 * @cia_encrypt, and the conditions are exactly the same. 230 * 339 * 231 * All fields are mandatory and must be filled 340 * All fields are mandatory and must be filled. 232 */ 341 */ 233 struct cipher_alg { 342 struct cipher_alg { 234 unsigned int cia_min_keysize; 343 unsigned int cia_min_keysize; 235 unsigned int cia_max_keysize; 344 unsigned int cia_max_keysize; 236 int (*cia_setkey)(struct crypto_tfm *t 345 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 237 unsigned int keylen) 346 unsigned int keylen); 238 void (*cia_encrypt)(struct crypto_tfm 347 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 239 void (*cia_decrypt)(struct crypto_tfm 348 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 240 }; 349 }; 241 350 242 /** << 243 * struct compress_alg - compression/decompres << 244 * @coa_compress: Compress a buffer of specifi << 245 * data in the specified buffer << 246 * compressed data in dlen. << 247 * @coa_decompress: Decompress the source buff << 248 * data in the specified buff << 249 * returned in dlen. << 250 * << 251 * All fields are mandatory. << 252 */ << 253 struct compress_alg { 351 struct compress_alg { 254 int (*coa_compress)(struct crypto_tfm 352 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 255 unsigned int slen, 353 unsigned int slen, u8 *dst, unsigned int *dlen); 256 int (*coa_decompress)(struct crypto_tf 354 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 257 unsigned int sle 355 unsigned int slen, u8 *dst, unsigned int *dlen); 258 }; 356 }; 259 357 >> 358 >> 359 #define cra_ablkcipher cra_u.ablkcipher >> 360 #define cra_blkcipher cra_u.blkcipher 260 #define cra_cipher cra_u.cipher 361 #define cra_cipher cra_u.cipher 261 #define cra_compress cra_u.compress 362 #define cra_compress cra_u.compress 262 363 263 /** 364 /** 264 * struct crypto_alg - definition of a cryptog 365 * struct crypto_alg - definition of a cryptograpic cipher algorithm 265 * @cra_flags: Flags describing this transform 366 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 266 * CRYPTO_ALG_* flags for the flag 367 * CRYPTO_ALG_* flags for the flags which go in here. Those are 267 * used for fine-tuning the descri 368 * used for fine-tuning the description of the transformation 268 * algorithm. 369 * algorithm. 269 * @cra_blocksize: Minimum block size of this 370 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 270 * of the smallest possible un 371 * of the smallest possible unit which can be transformed with 271 * this algorithm. The users m 372 * this algorithm. The users must respect this value. 272 * In case of HASH transformat 373 * In case of HASH transformation, it is possible for a smaller 273 * block than @cra_blocksize t 374 * block than @cra_blocksize to be passed to the crypto API for 274 * transformation, in case of 375 * transformation, in case of any other transformation type, an 275 * error will be returned upon 376 * error will be returned upon any attempt to transform smaller 276 * than @cra_blocksize chunks. 377 * than @cra_blocksize chunks. 277 * @cra_ctxsize: Size of the operational conte 378 * @cra_ctxsize: Size of the operational context of the transformation. This 278 * value informs the kernel cryp 379 * value informs the kernel crypto API about the memory size 279 * needed to be allocated for th 380 * needed to be allocated for the transformation context. 280 * @cra_alignmask: For cipher, skcipher, lskci !! 381 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 281 * 1 less than the alignment, !! 382 * buffer containing the input data for the algorithm must be 282 * implementation requires for !! 383 * aligned to this alignment mask. The data buffer for the 283 * the crypto API is invoked w !! 384 * output data must be aligned to this alignment mask. Note that 284 * to this alignment, the cryp !! 385 * the Crypto API will do the re-alignment in software, but 285 * appropriately aligned tempo !! 386 * only under special conditions and there is a performance hit. 286 * the algorithm needs. (For !! 387 * The re-alignment happens at these occasions for different 287 * the algorithm uses the skci !! 388 * @cra_u types: cipher -- For both input data and output data 288 * misalignment handling carri !! 389 * buffer; ahash -- For output hash destination buf; shash -- 289 * preferred that algorithms d !! 390 * For output hash destination buf. 290 * Also, crypto API users may !! 391 * This is needed on hardware which is flawed by design and 291 * to the alignmask of the alg !! 392 * cannot pick data from arbitrary addresses. 292 * avoid the API having to rea << 293 * not supported for hash algo << 294 * @cra_priority: Priority of this transformat 393 * @cra_priority: Priority of this transformation implementation. In case 295 * multiple transformations wit 394 * multiple transformations with same @cra_name are available to 296 * the Crypto API, the kernel w 395 * the Crypto API, the kernel will use the one with highest 297 * @cra_priority. 396 * @cra_priority. 298 * @cra_name: Generic name (usable by multiple 397 * @cra_name: Generic name (usable by multiple implementations) of the 299 * transformation algorithm. This i 398 * transformation algorithm. This is the name of the transformation 300 * itself. This field is used by th 399 * itself. This field is used by the kernel when looking up the 301 * providers of particular transfor 400 * providers of particular transformation. 302 * @cra_driver_name: Unique name of the transf 401 * @cra_driver_name: Unique name of the transformation provider. This is the 303 * name of the provider of t 402 * name of the provider of the transformation. This can be any 304 * arbitrary value, but in t 403 * arbitrary value, but in the usual case, this contains the 305 * name of the chip or provi 404 * name of the chip or provider and the name of the 306 * transformation algorithm. 405 * transformation algorithm. 307 * @cra_type: Type of the cryptographic transf 406 * @cra_type: Type of the cryptographic transformation. This is a pointer to 308 * struct crypto_type, which implem 407 * struct crypto_type, which implements callbacks common for all 309 * transformation types. There are !! 408 * transformation types. There are multiple options: 310 * &crypto_skcipher_type, &crypto_a !! 409 * &crypto_blkcipher_type, &crypto_ablkcipher_type, >> 410 * &crypto_ahash_type, &crypto_rng_type. 311 * This field might be empty. In th 411 * This field might be empty. In that case, there are no common 312 * callbacks. This is the case for: 412 * callbacks. This is the case for: cipher, compress, shash. 313 * @cra_u: Callbacks implementing the transfor 413 * @cra_u: Callbacks implementing the transformation. This is a union of 314 * multiple structures. Depending on t 414 * multiple structures. Depending on the type of transformation selected 315 * by @cra_type and @cra_flags above, 415 * by @cra_type and @cra_flags above, the associated structure must be 316 * filled with callbacks. This field m 416 * filled with callbacks. This field might be empty. This is the case 317 * for ahash, shash. 417 * for ahash, shash. 318 * @cra_init: Initialize the cryptographic tra 418 * @cra_init: Initialize the cryptographic transformation object. This function 319 * is used to initialize the crypto 419 * is used to initialize the cryptographic transformation object. 320 * This function is called only onc 420 * This function is called only once at the instantiation time, right 321 * after the transformation context 421 * after the transformation context was allocated. In case the 322 * cryptographic hardware has some 422 * cryptographic hardware has some special requirements which need to 323 * be handled by software, this fun 423 * be handled by software, this function shall check for the precise 324 * requirement of the transformatio 424 * requirement of the transformation and put any software fallbacks 325 * in place. 425 * in place. 326 * @cra_exit: Deinitialize the cryptographic t 426 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 327 * counterpart to @cra_init, used t 427 * counterpart to @cra_init, used to remove various changes set in 328 * @cra_init. 428 * @cra_init. 329 * @cra_u.cipher: Union member which contains << 330 * definition. See @struct @cip << 331 * @cra_u.compress: Union member which contain << 332 * See @struct @compress_alg. << 333 * @cra_module: Owner of this transformation i 429 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 334 * @cra_list: internally used 430 * @cra_list: internally used 335 * @cra_users: internally used 431 * @cra_users: internally used 336 * @cra_refcnt: internally used 432 * @cra_refcnt: internally used 337 * @cra_destroy: internally used 433 * @cra_destroy: internally used 338 * 434 * 339 * The struct crypto_alg describes a generic C 435 * The struct crypto_alg describes a generic Crypto API algorithm and is common 340 * for all of the transformations. Any variabl 436 * for all of the transformations. Any variable not documented here shall not 341 * be used by a cipher implementation as it is 437 * be used by a cipher implementation as it is internal to the Crypto API. 342 */ 438 */ 343 struct crypto_alg { 439 struct crypto_alg { 344 struct list_head cra_list; 440 struct list_head cra_list; 345 struct list_head cra_users; 441 struct list_head cra_users; 346 442 347 u32 cra_flags; 443 u32 cra_flags; 348 unsigned int cra_blocksize; 444 unsigned int cra_blocksize; 349 unsigned int cra_ctxsize; 445 unsigned int cra_ctxsize; 350 unsigned int cra_alignmask; 446 unsigned int cra_alignmask; 351 447 352 int cra_priority; 448 int cra_priority; 353 refcount_t cra_refcnt; !! 449 atomic_t cra_refcnt; 354 450 355 char cra_name[CRYPTO_MAX_ALG_NAME]; 451 char cra_name[CRYPTO_MAX_ALG_NAME]; 356 char cra_driver_name[CRYPTO_MAX_ALG_NA 452 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 357 453 358 const struct crypto_type *cra_type; 454 const struct crypto_type *cra_type; 359 455 360 union { 456 union { >> 457 struct ablkcipher_alg ablkcipher; >> 458 struct blkcipher_alg blkcipher; 361 struct cipher_alg cipher; 459 struct cipher_alg cipher; 362 struct compress_alg compress; 460 struct compress_alg compress; 363 } cra_u; 461 } cra_u; 364 462 365 int (*cra_init)(struct crypto_tfm *tfm 463 int (*cra_init)(struct crypto_tfm *tfm); 366 void (*cra_exit)(struct crypto_tfm *tf 464 void (*cra_exit)(struct crypto_tfm *tfm); 367 void (*cra_destroy)(struct crypto_alg 465 void (*cra_destroy)(struct crypto_alg *alg); 368 466 369 struct module *cra_module; 467 struct module *cra_module; 370 } CRYPTO_MINALIGN_ATTR; 468 } CRYPTO_MINALIGN_ATTR; 371 469 372 /* 470 /* 373 * A helper struct for waiting for completion !! 471 * Algorithm registration interface. 374 */ << 375 struct crypto_wait { << 376 struct completion completion; << 377 int err; << 378 }; << 379 << 380 /* << 381 * Macro for declaring a crypto op async wait << 382 */ 472 */ 383 #define DECLARE_CRYPTO_WAIT(_wait) \ !! 473 int crypto_register_alg(struct crypto_alg *alg); 384 struct crypto_wait _wait = { \ !! 474 int crypto_unregister_alg(struct crypto_alg *alg); 385 COMPLETION_INITIALIZER_ONSTACK !! 475 int crypto_register_algs(struct crypto_alg *algs, int count); 386 !! 476 int crypto_unregister_algs(struct crypto_alg *algs, int count); 387 /* << 388 * Async ops completion helper functioons << 389 */ << 390 void crypto_req_done(void *req, int err); << 391 << 392 static inline int crypto_wait_req(int err, str << 393 { << 394 switch (err) { << 395 case -EINPROGRESS: << 396 case -EBUSY: << 397 wait_for_completion(&wait->com << 398 reinit_completion(&wait->compl << 399 err = wait->err; << 400 break; << 401 } << 402 << 403 return err; << 404 } << 405 << 406 static inline void crypto_init_wait(struct cry << 407 { << 408 init_completion(&wait->completion); << 409 } << 410 477 411 /* 478 /* 412 * Algorithm query interface. 479 * Algorithm query interface. 413 */ 480 */ 414 int crypto_has_alg(const char *name, u32 type, 481 int crypto_has_alg(const char *name, u32 type, u32 mask); 415 482 416 /* 483 /* 417 * Transforms: user-instantiated objects which 484 * Transforms: user-instantiated objects which encapsulate algorithms 418 * and core processing logic. Managed via cry 485 * and core processing logic. Managed via crypto_alloc_*() and 419 * crypto_free_*(), as well as the various hel 486 * crypto_free_*(), as well as the various helpers below. 420 */ 487 */ 421 488 >> 489 struct ablkcipher_tfm { >> 490 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, >> 491 unsigned int keylen); >> 492 int (*encrypt)(struct ablkcipher_request *req); >> 493 int (*decrypt)(struct ablkcipher_request *req); >> 494 >> 495 struct crypto_ablkcipher *base; >> 496 >> 497 unsigned int ivsize; >> 498 unsigned int reqsize; >> 499 }; >> 500 >> 501 struct blkcipher_tfm { >> 502 void *iv; >> 503 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, >> 504 unsigned int keylen); >> 505 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, >> 506 struct scatterlist *src, unsigned int nbytes); >> 507 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, >> 508 struct scatterlist *src, unsigned int nbytes); >> 509 }; >> 510 >> 511 struct cipher_tfm { >> 512 int (*cit_setkey)(struct crypto_tfm *tfm, >> 513 const u8 *key, unsigned int keylen); >> 514 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); >> 515 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); >> 516 }; >> 517 >> 518 struct compress_tfm { >> 519 int (*cot_compress)(struct crypto_tfm *tfm, >> 520 const u8 *src, unsigned int slen, >> 521 u8 *dst, unsigned int *dlen); >> 522 int (*cot_decompress)(struct crypto_tfm *tfm, >> 523 const u8 *src, unsigned int slen, >> 524 u8 *dst, unsigned int *dlen); >> 525 }; >> 526 >> 527 #define crt_ablkcipher crt_u.ablkcipher >> 528 #define crt_blkcipher crt_u.blkcipher >> 529 #define crt_cipher crt_u.cipher >> 530 #define crt_compress crt_u.compress >> 531 422 struct crypto_tfm { 532 struct crypto_tfm { 423 refcount_t refcnt; << 424 533 425 u32 crt_flags; 534 u32 crt_flags; 426 << 427 int node; << 428 535 >> 536 union { >> 537 struct ablkcipher_tfm ablkcipher; >> 538 struct blkcipher_tfm blkcipher; >> 539 struct cipher_tfm cipher; >> 540 struct compress_tfm compress; >> 541 } crt_u; >> 542 429 void (*exit)(struct crypto_tfm *tfm); 543 void (*exit)(struct crypto_tfm *tfm); 430 544 431 struct crypto_alg *__crt_alg; 545 struct crypto_alg *__crt_alg; 432 546 433 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR 547 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 434 }; 548 }; 435 549 >> 550 struct crypto_ablkcipher { >> 551 struct crypto_tfm base; >> 552 }; >> 553 >> 554 struct crypto_blkcipher { >> 555 struct crypto_tfm base; >> 556 }; >> 557 >> 558 struct crypto_cipher { >> 559 struct crypto_tfm base; >> 560 }; >> 561 436 struct crypto_comp { 562 struct crypto_comp { 437 struct crypto_tfm base; 563 struct crypto_tfm base; 438 }; 564 }; 439 565 >> 566 enum { >> 567 CRYPTOA_UNSPEC, >> 568 CRYPTOA_ALG, >> 569 CRYPTOA_TYPE, >> 570 CRYPTOA_U32, >> 571 __CRYPTOA_MAX, >> 572 }; >> 573 >> 574 #define CRYPTOA_MAX (__CRYPTOA_MAX - 1) >> 575 >> 576 /* Maximum number of (rtattr) parameters for each template. */ >> 577 #define CRYPTO_MAX_ATTRS 32 >> 578 >> 579 struct crypto_attr_alg { >> 580 char name[CRYPTO_MAX_ALG_NAME]; >> 581 }; >> 582 >> 583 struct crypto_attr_type { >> 584 u32 type; >> 585 u32 mask; >> 586 }; >> 587 >> 588 struct crypto_attr_u32 { >> 589 u32 num; >> 590 }; >> 591 440 /* 592 /* 441 * Transform user interface. 593 * Transform user interface. 442 */ 594 */ 443 595 444 struct crypto_tfm *crypto_alloc_base(const cha 596 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 445 void crypto_destroy_tfm(void *mem, struct cryp 597 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 446 598 447 static inline void crypto_free_tfm(struct cryp 599 static inline void crypto_free_tfm(struct crypto_tfm *tfm) 448 { 600 { 449 return crypto_destroy_tfm(tfm, tfm); 601 return crypto_destroy_tfm(tfm, tfm); 450 } 602 } 451 603 >> 604 int alg_test(const char *driver, const char *alg, u32 type, u32 mask); >> 605 452 /* 606 /* 453 * Transform helpers which query the underlyin 607 * Transform helpers which query the underlying algorithm. 454 */ 608 */ 455 static inline const char *crypto_tfm_alg_name( 609 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 456 { 610 { 457 return tfm->__crt_alg->cra_name; 611 return tfm->__crt_alg->cra_name; 458 } 612 } 459 613 460 static inline const char *crypto_tfm_alg_drive 614 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 461 { 615 { 462 return tfm->__crt_alg->cra_driver_name 616 return tfm->__crt_alg->cra_driver_name; 463 } 617 } 464 618 >> 619 static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) >> 620 { >> 621 return tfm->__crt_alg->cra_priority; >> 622 } >> 623 >> 624 static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) >> 625 { >> 626 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; >> 627 } >> 628 465 static inline unsigned int crypto_tfm_alg_bloc 629 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 466 { 630 { 467 return tfm->__crt_alg->cra_blocksize; 631 return tfm->__crt_alg->cra_blocksize; 468 } 632 } 469 633 470 static inline unsigned int crypto_tfm_alg_alig 634 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 471 { 635 { 472 return tfm->__crt_alg->cra_alignmask; 636 return tfm->__crt_alg->cra_alignmask; 473 } 637 } 474 638 475 static inline u32 crypto_tfm_get_flags(struct 639 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 476 { 640 { 477 return tfm->crt_flags; 641 return tfm->crt_flags; 478 } 642 } 479 643 480 static inline void crypto_tfm_set_flags(struct 644 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 481 { 645 { 482 tfm->crt_flags |= flags; 646 tfm->crt_flags |= flags; 483 } 647 } 484 648 485 static inline void crypto_tfm_clear_flags(stru 649 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 486 { 650 { 487 tfm->crt_flags &= ~flags; 651 tfm->crt_flags &= ~flags; 488 } 652 } 489 653 >> 654 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) >> 655 { >> 656 return tfm->__crt_ctx; >> 657 } >> 658 490 static inline unsigned int crypto_tfm_ctx_alig 659 static inline unsigned int crypto_tfm_ctx_alignment(void) 491 { 660 { 492 struct crypto_tfm *tfm; 661 struct crypto_tfm *tfm; 493 return __alignof__(tfm->__crt_ctx); 662 return __alignof__(tfm->__crt_ctx); 494 } 663 } 495 664 >> 665 /* >> 666 * API wrappers. >> 667 */ >> 668 static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( >> 669 struct crypto_tfm *tfm) >> 670 { >> 671 return (struct crypto_ablkcipher *)tfm; >> 672 } >> 673 >> 674 static inline u32 crypto_skcipher_type(u32 type) >> 675 { >> 676 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); >> 677 type |= CRYPTO_ALG_TYPE_BLKCIPHER; >> 678 return type; >> 679 } >> 680 >> 681 static inline u32 crypto_skcipher_mask(u32 mask) >> 682 { >> 683 mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); >> 684 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; >> 685 return mask; >> 686 } >> 687 >> 688 /** >> 689 * DOC: Asynchronous Block Cipher API >> 690 * >> 691 * Asynchronous block cipher API is used with the ciphers of type >> 692 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). >> 693 * >> 694 * Asynchronous cipher operations imply that the function invocation for a >> 695 * cipher request returns immediately before the completion of the operation. >> 696 * The cipher request is scheduled as a separate kernel thread and therefore >> 697 * load-balanced on the different CPUs via the process scheduler. To allow >> 698 * the kernel crypto API to inform the caller about the completion of a cipher >> 699 * request, the caller must provide a callback function. That function is >> 700 * invoked with the cipher handle when the request completes. >> 701 * >> 702 * To support the asynchronous operation, additional information than just the >> 703 * cipher handle must be supplied to the kernel crypto API. That additional >> 704 * information is given by filling in the ablkcipher_request data structure. >> 705 * >> 706 * For the asynchronous block cipher API, the state is maintained with the tfm >> 707 * cipher handle. A single tfm can be used across multiple calls and in >> 708 * parallel. For asynchronous block cipher calls, context data supplied and >> 709 * only used by the caller can be referenced the request data structure in >> 710 * addition to the IV used for the cipher request. The maintenance of such >> 711 * state information would be important for a crypto driver implementer to >> 712 * have, because when calling the callback function upon completion of the >> 713 * cipher operation, that callback function may need some information about >> 714 * which operation just finished if it invoked multiple in parallel. This >> 715 * state information is unused by the kernel crypto API. >> 716 */ >> 717 >> 718 static inline struct crypto_tfm *crypto_ablkcipher_tfm( >> 719 struct crypto_ablkcipher *tfm) >> 720 { >> 721 return &tfm->base; >> 722 } >> 723 >> 724 /** >> 725 * crypto_free_ablkcipher() - zeroize and free cipher handle >> 726 * @tfm: cipher handle to be freed >> 727 */ >> 728 static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) >> 729 { >> 730 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); >> 731 } >> 732 >> 733 /** >> 734 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. >> 735 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the >> 736 * ablkcipher >> 737 * @type: specifies the type of the cipher >> 738 * @mask: specifies the mask for the cipher >> 739 * >> 740 * Return: true when the ablkcipher is known to the kernel crypto API; false >> 741 * otherwise >> 742 */ >> 743 static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, >> 744 u32 mask) >> 745 { >> 746 return crypto_has_alg(alg_name, crypto_skcipher_type(type), >> 747 crypto_skcipher_mask(mask)); >> 748 } >> 749 >> 750 static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( >> 751 struct crypto_ablkcipher *tfm) >> 752 { >> 753 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; >> 754 } >> 755 >> 756 /** >> 757 * crypto_ablkcipher_ivsize() - obtain IV size >> 758 * @tfm: cipher handle >> 759 * >> 760 * The size of the IV for the ablkcipher referenced by the cipher handle is >> 761 * returned. This IV size may be zero if the cipher does not need an IV. >> 762 * >> 763 * Return: IV size in bytes >> 764 */ >> 765 static inline unsigned int crypto_ablkcipher_ivsize( >> 766 struct crypto_ablkcipher *tfm) >> 767 { >> 768 return crypto_ablkcipher_crt(tfm)->ivsize; >> 769 } >> 770 >> 771 /** >> 772 * crypto_ablkcipher_blocksize() - obtain block size of cipher >> 773 * @tfm: cipher handle >> 774 * >> 775 * The block size for the ablkcipher referenced with the cipher handle is >> 776 * returned. The caller may use that information to allocate appropriate >> 777 * memory for the data returned by the encryption or decryption operation >> 778 * >> 779 * Return: block size of cipher >> 780 */ >> 781 static inline unsigned int crypto_ablkcipher_blocksize( >> 782 struct crypto_ablkcipher *tfm) >> 783 { >> 784 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); >> 785 } >> 786 >> 787 static inline unsigned int crypto_ablkcipher_alignmask( >> 788 struct crypto_ablkcipher *tfm) >> 789 { >> 790 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); >> 791 } >> 792 >> 793 static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) >> 794 { >> 795 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); >> 796 } >> 797 >> 798 static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, >> 799 u32 flags) >> 800 { >> 801 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); >> 802 } >> 803 >> 804 static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, >> 805 u32 flags) >> 806 { >> 807 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); >> 808 } >> 809 >> 810 /** >> 811 * crypto_ablkcipher_setkey() - set key for cipher >> 812 * @tfm: cipher handle >> 813 * @key: buffer holding the key >> 814 * @keylen: length of the key in bytes >> 815 * >> 816 * The caller provided key is set for the ablkcipher referenced by the cipher >> 817 * handle. >> 818 * >> 819 * Note, the key length determines the cipher type. Many block ciphers implement >> 820 * different cipher modes depending on the key size, such as AES-128 vs AES-192 >> 821 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 >> 822 * is performed. >> 823 * >> 824 * Return: 0 if the setting of the key was successful; < 0 if an error occurred >> 825 */ >> 826 static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, >> 827 const u8 *key, unsigned int keylen) >> 828 { >> 829 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); >> 830 >> 831 return crt->setkey(crt->base, key, keylen); >> 832 } >> 833 >> 834 /** >> 835 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request >> 836 * @req: ablkcipher_request out of which the cipher handle is to be obtained >> 837 * >> 838 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request >> 839 * data structure. >> 840 * >> 841 * Return: crypto_ablkcipher handle >> 842 */ >> 843 static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( >> 844 struct ablkcipher_request *req) >> 845 { >> 846 return __crypto_ablkcipher_cast(req->base.tfm); >> 847 } >> 848 >> 849 /** >> 850 * crypto_ablkcipher_encrypt() - encrypt plaintext >> 851 * @req: reference to the ablkcipher_request handle that holds all information >> 852 * needed to perform the cipher operation >> 853 * >> 854 * Encrypt plaintext data using the ablkcipher_request handle. That data >> 855 * structure and how it is filled with data is discussed with the >> 856 * ablkcipher_request_* functions. >> 857 * >> 858 * Return: 0 if the cipher operation was successful; < 0 if an error occurred >> 859 */ >> 860 static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) >> 861 { >> 862 struct ablkcipher_tfm *crt = >> 863 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); >> 864 return crt->encrypt(req); >> 865 } >> 866 >> 867 /** >> 868 * crypto_ablkcipher_decrypt() - decrypt ciphertext >> 869 * @req: reference to the ablkcipher_request handle that holds all information >> 870 * needed to perform the cipher operation >> 871 * >> 872 * Decrypt ciphertext data using the ablkcipher_request handle. That data >> 873 * structure and how it is filled with data is discussed with the >> 874 * ablkcipher_request_* functions. >> 875 * >> 876 * Return: 0 if the cipher operation was successful; < 0 if an error occurred >> 877 */ >> 878 static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) >> 879 { >> 880 struct ablkcipher_tfm *crt = >> 881 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); >> 882 return crt->decrypt(req); >> 883 } >> 884 >> 885 /** >> 886 * DOC: Asynchronous Cipher Request Handle >> 887 * >> 888 * The ablkcipher_request data structure contains all pointers to data >> 889 * required for the asynchronous cipher operation. This includes the cipher >> 890 * handle (which can be used by multiple ablkcipher_request instances), pointer >> 891 * to plaintext and ciphertext, asynchronous callback function, etc. It acts >> 892 * as a handle to the ablkcipher_request_* API calls in a similar way as >> 893 * ablkcipher handle to the crypto_ablkcipher_* API calls. >> 894 */ >> 895 >> 896 /** >> 897 * crypto_ablkcipher_reqsize() - obtain size of the request data structure >> 898 * @tfm: cipher handle >> 899 * >> 900 * Return: number of bytes >> 901 */ >> 902 static inline unsigned int crypto_ablkcipher_reqsize( >> 903 struct crypto_ablkcipher *tfm) >> 904 { >> 905 return crypto_ablkcipher_crt(tfm)->reqsize; >> 906 } >> 907 >> 908 /** >> 909 * ablkcipher_request_set_tfm() - update cipher handle reference in request >> 910 * @req: request handle to be modified >> 911 * @tfm: cipher handle that shall be added to the request handle >> 912 * >> 913 * Allow the caller to replace the existing ablkcipher handle in the request >> 914 * data structure with a different one. >> 915 */ >> 916 static inline void ablkcipher_request_set_tfm( >> 917 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) >> 918 { >> 919 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); >> 920 } >> 921 >> 922 static inline struct ablkcipher_request *ablkcipher_request_cast( >> 923 struct crypto_async_request *req) >> 924 { >> 925 return container_of(req, struct ablkcipher_request, base); >> 926 } >> 927 >> 928 /** >> 929 * ablkcipher_request_alloc() - allocate request data structure >> 930 * @tfm: cipher handle to be registered with the request >> 931 * @gfp: memory allocation flag that is handed to kmalloc by the API call. >> 932 * >> 933 * Allocate the request data structure that must be used with the ablkcipher >> 934 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher >> 935 * handle is registered in the request data structure. >> 936 * >> 937 * Return: allocated request handle in case of success, or NULL if out of memory >> 938 */ >> 939 static inline struct ablkcipher_request *ablkcipher_request_alloc( >> 940 struct crypto_ablkcipher *tfm, gfp_t gfp) >> 941 { >> 942 struct ablkcipher_request *req; >> 943 >> 944 req = kmalloc(sizeof(struct ablkcipher_request) + >> 945 crypto_ablkcipher_reqsize(tfm), gfp); >> 946 >> 947 if (likely(req)) >> 948 ablkcipher_request_set_tfm(req, tfm); >> 949 >> 950 return req; >> 951 } >> 952 >> 953 /** >> 954 * ablkcipher_request_free() - zeroize and free request data structure >> 955 * @req: request data structure cipher handle to be freed >> 956 */ >> 957 static inline void ablkcipher_request_free(struct ablkcipher_request *req) >> 958 { >> 959 kzfree(req); >> 960 } >> 961 >> 962 /** >> 963 * ablkcipher_request_set_callback() - set asynchronous callback function >> 964 * @req: request handle >> 965 * @flags: specify zero or an ORing of the flags >> 966 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and >> 967 * increase the wait queue beyond the initial maximum size; >> 968 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep >> 969 * @compl: callback function pointer to be registered with the request handle >> 970 * @data: The data pointer refers to memory that is not used by the kernel >> 971 * crypto API, but provided to the callback function for it to use. Here, >> 972 * the caller can provide a reference to memory the callback function can >> 973 * operate on. As the callback function is invoked asynchronously to the >> 974 * related functionality, it may need to access data structures of the >> 975 * related functionality which can be referenced using this pointer. The >> 976 * callback function can access the memory via the "data" field in the >> 977 * crypto_async_request data structure provided to the callback function. >> 978 * >> 979 * This function allows setting the callback function that is triggered once the >> 980 * cipher operation completes. >> 981 * >> 982 * The callback function is registered with the ablkcipher_request handle and >> 983 * must comply with the following template:: >> 984 * >> 985 * void callback_function(struct crypto_async_request *req, int error) >> 986 */ >> 987 static inline void ablkcipher_request_set_callback( >> 988 struct ablkcipher_request *req, >> 989 u32 flags, crypto_completion_t compl, void *data) >> 990 { >> 991 req->base.complete = compl; >> 992 req->base.data = data; >> 993 req->base.flags = flags; >> 994 } >> 995 >> 996 /** >> 997 * ablkcipher_request_set_crypt() - set data buffers >> 998 * @req: request handle >> 999 * @src: source scatter / gather list >> 1000 * @dst: destination scatter / gather list >> 1001 * @nbytes: number of bytes to process from @src >> 1002 * @iv: IV for the cipher operation which must comply with the IV size defined >> 1003 * by crypto_ablkcipher_ivsize >> 1004 * >> 1005 * This function allows setting of the source data and destination data >> 1006 * scatter / gather lists. >> 1007 * >> 1008 * For encryption, the source is treated as the plaintext and the >> 1009 * destination is the ciphertext. For a decryption operation, the use is >> 1010 * reversed - the source is the ciphertext and the destination is the plaintext. >> 1011 */ >> 1012 static inline void ablkcipher_request_set_crypt( >> 1013 struct ablkcipher_request *req, >> 1014 struct scatterlist *src, struct scatterlist *dst, >> 1015 unsigned int nbytes, void *iv) >> 1016 { >> 1017 req->src = src; >> 1018 req->dst = dst; >> 1019 req->nbytes = nbytes; >> 1020 req->info = iv; >> 1021 } >> 1022 >> 1023 /** >> 1024 * DOC: Synchronous Block Cipher API >> 1025 * >> 1026 * The synchronous block cipher API is used with the ciphers of type >> 1027 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) >> 1028 * >> 1029 * Synchronous calls, have a context in the tfm. But since a single tfm can be >> 1030 * used in multiple calls and in parallel, this info should not be changeable >> 1031 * (unless a lock is used). This applies, for example, to the symmetric key. >> 1032 * However, the IV is changeable, so there is an iv field in blkcipher_tfm >> 1033 * structure for synchronous blkcipher api. So, its the only state info that can >> 1034 * be kept for synchronous calls without using a big lock across a tfm. >> 1035 * >> 1036 * The block cipher API allows the use of a complete cipher, i.e. a cipher >> 1037 * consisting of a template (a block chaining mode) and a single block cipher >> 1038 * primitive (e.g. AES). >> 1039 * >> 1040 * The plaintext data buffer and the ciphertext data buffer are pointed to >> 1041 * by using scatter/gather lists. The cipher operation is performed >> 1042 * on all segments of the provided scatter/gather lists. >> 1043 * >> 1044 * The kernel crypto API supports a cipher operation "in-place" which means that >> 1045 * the caller may provide the same scatter/gather list for the plaintext and >> 1046 * cipher text. After the completion of the cipher operation, the plaintext >> 1047 * data is replaced with the ciphertext data in case of an encryption and vice >> 1048 * versa for a decryption. The caller must ensure that the scatter/gather lists >> 1049 * for the output data point to sufficiently large buffers, i.e. multiples of >> 1050 * the block size of the cipher. >> 1051 */ >> 1052 >> 1053 static inline struct crypto_blkcipher *__crypto_blkcipher_cast( >> 1054 struct crypto_tfm *tfm) >> 1055 { >> 1056 return (struct crypto_blkcipher *)tfm; >> 1057 } >> 1058 >> 1059 static inline struct crypto_blkcipher *crypto_blkcipher_cast( >> 1060 struct crypto_tfm *tfm) >> 1061 { >> 1062 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); >> 1063 return __crypto_blkcipher_cast(tfm); >> 1064 } >> 1065 >> 1066 /** >> 1067 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle >> 1068 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the >> 1069 * blkcipher cipher >> 1070 * @type: specifies the type of the cipher >> 1071 * @mask: specifies the mask for the cipher >> 1072 * >> 1073 * Allocate a cipher handle for a block cipher. The returned struct >> 1074 * crypto_blkcipher is the cipher handle that is required for any subsequent >> 1075 * API invocation for that block cipher. >> 1076 * >> 1077 * Return: allocated cipher handle in case of success; IS_ERR() is true in case >> 1078 * of an error, PTR_ERR() returns the error code. >> 1079 */ >> 1080 static inline struct crypto_blkcipher *crypto_alloc_blkcipher( >> 1081 const char *alg_name, u32 type, u32 mask) >> 1082 { >> 1083 type &= ~CRYPTO_ALG_TYPE_MASK; >> 1084 type |= CRYPTO_ALG_TYPE_BLKCIPHER; >> 1085 mask |= CRYPTO_ALG_TYPE_MASK; >> 1086 >> 1087 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); >> 1088 } >> 1089 >> 1090 static inline struct crypto_tfm *crypto_blkcipher_tfm( >> 1091 struct crypto_blkcipher *tfm) >> 1092 { >> 1093 return &tfm->base; >> 1094 } >> 1095 >> 1096 /** >> 1097 * crypto_free_blkcipher() - zeroize and free the block cipher handle >> 1098 * @tfm: cipher handle to be freed >> 1099 */ >> 1100 static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) >> 1101 { >> 1102 crypto_free_tfm(crypto_blkcipher_tfm(tfm)); >> 1103 } >> 1104 >> 1105 /** >> 1106 * crypto_has_blkcipher() - Search for the availability of a block cipher >> 1107 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the >> 1108 * block cipher >> 1109 * @type: specifies the type of the cipher >> 1110 * @mask: specifies the mask for the cipher >> 1111 * >> 1112 * Return: true when the block cipher is known to the kernel crypto API; false >> 1113 * otherwise >> 1114 */ >> 1115 static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) >> 1116 { >> 1117 type &= ~CRYPTO_ALG_TYPE_MASK; >> 1118 type |= CRYPTO_ALG_TYPE_BLKCIPHER; >> 1119 mask |= CRYPTO_ALG_TYPE_MASK; >> 1120 >> 1121 return crypto_has_alg(alg_name, type, mask); >> 1122 } >> 1123 >> 1124 /** >> 1125 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle >> 1126 * @tfm: cipher handle >> 1127 * >> 1128 * Return: The character string holding the name of the cipher >> 1129 */ >> 1130 static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) >> 1131 { >> 1132 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); >> 1133 } >> 1134 >> 1135 static inline struct blkcipher_tfm *crypto_blkcipher_crt( >> 1136 struct crypto_blkcipher *tfm) >> 1137 { >> 1138 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; >> 1139 } >> 1140 >> 1141 static inline struct blkcipher_alg *crypto_blkcipher_alg( >> 1142 struct crypto_blkcipher *tfm) >> 1143 { >> 1144 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; >> 1145 } >> 1146 >> 1147 /** >> 1148 * crypto_blkcipher_ivsize() - obtain IV size >> 1149 * @tfm: cipher handle >> 1150 * >> 1151 * The size of the IV for the block cipher referenced by the cipher handle is >> 1152 * returned. This IV size may be zero if the cipher does not need an IV. >> 1153 * >> 1154 * Return: IV size in bytes >> 1155 */ >> 1156 static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) >> 1157 { >> 1158 return crypto_blkcipher_alg(tfm)->ivsize; >> 1159 } >> 1160 >> 1161 /** >> 1162 * crypto_blkcipher_blocksize() - obtain block size of cipher >> 1163 * @tfm: cipher handle >> 1164 * >> 1165 * The block size for the block cipher referenced with the cipher handle is >> 1166 * returned. The caller may use that information to allocate appropriate >> 1167 * memory for the data returned by the encryption or decryption operation. >> 1168 * >> 1169 * Return: block size of cipher >> 1170 */ >> 1171 static inline unsigned int crypto_blkcipher_blocksize( >> 1172 struct crypto_blkcipher *tfm) >> 1173 { >> 1174 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); >> 1175 } >> 1176 >> 1177 static inline unsigned int crypto_blkcipher_alignmask( >> 1178 struct crypto_blkcipher *tfm) >> 1179 { >> 1180 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); >> 1181 } >> 1182 >> 1183 static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) >> 1184 { >> 1185 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); >> 1186 } >> 1187 >> 1188 static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, >> 1189 u32 flags) >> 1190 { >> 1191 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); >> 1192 } >> 1193 >> 1194 static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, >> 1195 u32 flags) >> 1196 { >> 1197 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); >> 1198 } >> 1199 >> 1200 /** >> 1201 * crypto_blkcipher_setkey() - set key for cipher >> 1202 * @tfm: cipher handle >> 1203 * @key: buffer holding the key >> 1204 * @keylen: length of the key in bytes >> 1205 * >> 1206 * The caller provided key is set for the block cipher referenced by the cipher >> 1207 * handle. >> 1208 * >> 1209 * Note, the key length determines the cipher type. Many block ciphers implement >> 1210 * different cipher modes depending on the key size, such as AES-128 vs AES-192 >> 1211 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 >> 1212 * is performed. >> 1213 * >> 1214 * Return: 0 if the setting of the key was successful; < 0 if an error occurred >> 1215 */ >> 1216 static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, >> 1217 const u8 *key, unsigned int keylen) >> 1218 { >> 1219 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), >> 1220 key, keylen); >> 1221 } >> 1222 >> 1223 /** >> 1224 * crypto_blkcipher_encrypt() - encrypt plaintext >> 1225 * @desc: reference to the block cipher handle with meta data >> 1226 * @dst: scatter/gather list that is filled by the cipher operation with the >> 1227 * ciphertext >> 1228 * @src: scatter/gather list that holds the plaintext >> 1229 * @nbytes: number of bytes of the plaintext to encrypt. >> 1230 * >> 1231 * Encrypt plaintext data using the IV set by the caller with a preceding >> 1232 * call of crypto_blkcipher_set_iv. >> 1233 * >> 1234 * The blkcipher_desc data structure must be filled by the caller and can >> 1235 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled >> 1236 * with the block cipher handle; desc.flags is filled with either >> 1237 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. >> 1238 * >> 1239 * Return: 0 if the cipher operation was successful; < 0 if an error occurred >> 1240 */ >> 1241 static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, >> 1242 struct scatterlist *dst, >> 1243 struct scatterlist *src, >> 1244 unsigned int nbytes) >> 1245 { >> 1246 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; >> 1247 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); >> 1248 } >> 1249 >> 1250 /** >> 1251 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV >> 1252 * @desc: reference to the block cipher handle with meta data >> 1253 * @dst: scatter/gather list that is filled by the cipher operation with the >> 1254 * ciphertext >> 1255 * @src: scatter/gather list that holds the plaintext >> 1256 * @nbytes: number of bytes of the plaintext to encrypt. >> 1257 * >> 1258 * Encrypt plaintext data with the use of an IV that is solely used for this >> 1259 * cipher operation. Any previously set IV is not used. >> 1260 * >> 1261 * The blkcipher_desc data structure must be filled by the caller and can >> 1262 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled >> 1263 * with the block cipher handle; desc.info is filled with the IV to be used for >> 1264 * the current operation; desc.flags is filled with either >> 1265 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. >> 1266 * >> 1267 * Return: 0 if the cipher operation was successful; < 0 if an error occurred >> 1268 */ >> 1269 static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, >> 1270 struct scatterlist *dst, >> 1271 struct scatterlist *src, >> 1272 unsigned int nbytes) >> 1273 { >> 1274 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); >> 1275 } >> 1276 >> 1277 /** >> 1278 * crypto_blkcipher_decrypt() - decrypt ciphertext >> 1279 * @desc: reference to the block cipher handle with meta data >> 1280 * @dst: scatter/gather list that is filled by the cipher operation with the >> 1281 * plaintext >> 1282 * @src: scatter/gather list that holds the ciphertext >> 1283 * @nbytes: number of bytes of the ciphertext to decrypt. >> 1284 * >> 1285 * Decrypt ciphertext data using the IV set by the caller with a preceding >> 1286 * call of crypto_blkcipher_set_iv. >> 1287 * >> 1288 * The blkcipher_desc data structure must be filled by the caller as documented >> 1289 * for the crypto_blkcipher_encrypt call above. >> 1290 * >> 1291 * Return: 0 if the cipher operation was successful; < 0 if an error occurred >> 1292 * >> 1293 */ >> 1294 static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, >> 1295 struct scatterlist *dst, >> 1296 struct scatterlist *src, >> 1297 unsigned int nbytes) >> 1298 { >> 1299 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; >> 1300 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); >> 1301 } >> 1302 >> 1303 /** >> 1304 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV >> 1305 * @desc: reference to the block cipher handle with meta data >> 1306 * @dst: scatter/gather list that is filled by the cipher operation with the >> 1307 * plaintext >> 1308 * @src: scatter/gather list that holds the ciphertext >> 1309 * @nbytes: number of bytes of the ciphertext to decrypt. >> 1310 * >> 1311 * Decrypt ciphertext data with the use of an IV that is solely used for this >> 1312 * cipher operation. Any previously set IV is not used. >> 1313 * >> 1314 * The blkcipher_desc data structure must be filled by the caller as documented >> 1315 * for the crypto_blkcipher_encrypt_iv call above. >> 1316 * >> 1317 * Return: 0 if the cipher operation was successful; < 0 if an error occurred >> 1318 */ >> 1319 static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, >> 1320 struct scatterlist *dst, >> 1321 struct scatterlist *src, >> 1322 unsigned int nbytes) >> 1323 { >> 1324 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); >> 1325 } >> 1326 >> 1327 /** >> 1328 * crypto_blkcipher_set_iv() - set IV for cipher >> 1329 * @tfm: cipher handle >> 1330 * @src: buffer holding the IV >> 1331 * @len: length of the IV in bytes >> 1332 * >> 1333 * The caller provided IV is set for the block cipher referenced by the cipher >> 1334 * handle. >> 1335 */ >> 1336 static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, >> 1337 const u8 *src, unsigned int len) >> 1338 { >> 1339 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); >> 1340 } >> 1341 >> 1342 /** >> 1343 * crypto_blkcipher_get_iv() - obtain IV from cipher >> 1344 * @tfm: cipher handle >> 1345 * @dst: buffer filled with the IV >> 1346 * @len: length of the buffer dst >> 1347 * >> 1348 * The caller can obtain the IV set for the block cipher referenced by the >> 1349 * cipher handle and store it into the user-provided buffer. If the buffer >> 1350 * has an insufficient space, the IV is truncated to fit the buffer. >> 1351 */ >> 1352 static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, >> 1353 u8 *dst, unsigned int len) >> 1354 { >> 1355 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); >> 1356 } >> 1357 >> 1358 /** >> 1359 * DOC: Single Block Cipher API >> 1360 * >> 1361 * The single block cipher API is used with the ciphers of type >> 1362 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). >> 1363 * >> 1364 * Using the single block cipher API calls, operations with the basic cipher >> 1365 * primitive can be implemented. These cipher primitives exclude any block >> 1366 * chaining operations including IV handling. >> 1367 * >> 1368 * The purpose of this single block cipher API is to support the implementation >> 1369 * of templates or other concepts that only need to perform the cipher operation >> 1370 * on one block at a time. Templates invoke the underlying cipher primitive >> 1371 * block-wise and process either the input or the output data of these cipher >> 1372 * operations. >> 1373 */ >> 1374 >> 1375 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) >> 1376 { >> 1377 return (struct crypto_cipher *)tfm; >> 1378 } >> 1379 >> 1380 static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) >> 1381 { >> 1382 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); >> 1383 return __crypto_cipher_cast(tfm); >> 1384 } >> 1385 >> 1386 /** >> 1387 * crypto_alloc_cipher() - allocate single block cipher handle >> 1388 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the >> 1389 * single block cipher >> 1390 * @type: specifies the type of the cipher >> 1391 * @mask: specifies the mask for the cipher >> 1392 * >> 1393 * Allocate a cipher handle for a single block cipher. The returned struct >> 1394 * crypto_cipher is the cipher handle that is required for any subsequent API >> 1395 * invocation for that single block cipher. >> 1396 * >> 1397 * Return: allocated cipher handle in case of success; IS_ERR() is true in case >> 1398 * of an error, PTR_ERR() returns the error code. >> 1399 */ >> 1400 static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, >> 1401 u32 type, u32 mask) >> 1402 { >> 1403 type &= ~CRYPTO_ALG_TYPE_MASK; >> 1404 type |= CRYPTO_ALG_TYPE_CIPHER; >> 1405 mask |= CRYPTO_ALG_TYPE_MASK; >> 1406 >> 1407 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); >> 1408 } >> 1409 >> 1410 static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) >> 1411 { >> 1412 return &tfm->base; >> 1413 } >> 1414 >> 1415 /** >> 1416 * crypto_free_cipher() - zeroize and free the single block cipher handle >> 1417 * @tfm: cipher handle to be freed >> 1418 */ >> 1419 static inline void crypto_free_cipher(struct crypto_cipher *tfm) >> 1420 { >> 1421 crypto_free_tfm(crypto_cipher_tfm(tfm)); >> 1422 } >> 1423 >> 1424 /** >> 1425 * crypto_has_cipher() - Search for the availability of a single block cipher >> 1426 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the >> 1427 * single block cipher >> 1428 * @type: specifies the type of the cipher >> 1429 * @mask: specifies the mask for the cipher >> 1430 * >> 1431 * Return: true when the single block cipher is known to the kernel crypto API; >> 1432 * false otherwise >> 1433 */ >> 1434 static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) >> 1435 { >> 1436 type &= ~CRYPTO_ALG_TYPE_MASK; >> 1437 type |= CRYPTO_ALG_TYPE_CIPHER; >> 1438 mask |= CRYPTO_ALG_TYPE_MASK; >> 1439 >> 1440 return crypto_has_alg(alg_name, type, mask); >> 1441 } >> 1442 >> 1443 static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) >> 1444 { >> 1445 return &crypto_cipher_tfm(tfm)->crt_cipher; >> 1446 } >> 1447 >> 1448 /** >> 1449 * crypto_cipher_blocksize() - obtain block size for cipher >> 1450 * @tfm: cipher handle >> 1451 * >> 1452 * The block size for the single block cipher referenced with the cipher handle >> 1453 * tfm is returned. The caller may use that information to allocate appropriate >> 1454 * memory for the data returned by the encryption or decryption operation >> 1455 * >> 1456 * Return: block size of cipher >> 1457 */ >> 1458 static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) >> 1459 { >> 1460 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); >> 1461 } >> 1462 >> 1463 static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) >> 1464 { >> 1465 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); >> 1466 } >> 1467 >> 1468 static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) >> 1469 { >> 1470 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); >> 1471 } >> 1472 >> 1473 static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, >> 1474 u32 flags) >> 1475 { >> 1476 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); >> 1477 } >> 1478 >> 1479 static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, >> 1480 u32 flags) >> 1481 { >> 1482 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); >> 1483 } >> 1484 >> 1485 /** >> 1486 * crypto_cipher_setkey() - set key for cipher >> 1487 * @tfm: cipher handle >> 1488 * @key: buffer holding the key >> 1489 * @keylen: length of the key in bytes >> 1490 * >> 1491 * The caller provided key is set for the single block cipher referenced by the >> 1492 * cipher handle. >> 1493 * >> 1494 * Note, the key length determines the cipher type. Many block ciphers implement >> 1495 * different cipher modes depending on the key size, such as AES-128 vs AES-192 >> 1496 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 >> 1497 * is performed. >> 1498 * >> 1499 * Return: 0 if the setting of the key was successful; < 0 if an error occurred >> 1500 */ >> 1501 static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, >> 1502 const u8 *key, unsigned int keylen) >> 1503 { >> 1504 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), >> 1505 key, keylen); >> 1506 } >> 1507 >> 1508 /** >> 1509 * crypto_cipher_encrypt_one() - encrypt one block of plaintext >> 1510 * @tfm: cipher handle >> 1511 * @dst: points to the buffer that will be filled with the ciphertext >> 1512 * @src: buffer holding the plaintext to be encrypted >> 1513 * >> 1514 * Invoke the encryption operation of one block. The caller must ensure that >> 1515 * the plaintext and ciphertext buffers are at least one block in size. >> 1516 */ >> 1517 static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, >> 1518 u8 *dst, const u8 *src) >> 1519 { >> 1520 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), >> 1521 dst, src); >> 1522 } >> 1523 >> 1524 /** >> 1525 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext >> 1526 * @tfm: cipher handle >> 1527 * @dst: points to the buffer that will be filled with the plaintext >> 1528 * @src: buffer holding the ciphertext to be decrypted >> 1529 * >> 1530 * Invoke the decryption operation of one block. The caller must ensure that >> 1531 * the plaintext and ciphertext buffers are at least one block in size. >> 1532 */ >> 1533 static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, >> 1534 u8 *dst, const u8 *src) >> 1535 { >> 1536 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), >> 1537 dst, src); >> 1538 } >> 1539 496 static inline struct crypto_comp *__crypto_com 1540 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 497 { 1541 { 498 return (struct crypto_comp *)tfm; 1542 return (struct crypto_comp *)tfm; 499 } 1543 } 500 1544 >> 1545 static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) >> 1546 { >> 1547 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & >> 1548 CRYPTO_ALG_TYPE_MASK); >> 1549 return __crypto_comp_cast(tfm); >> 1550 } >> 1551 501 static inline struct crypto_comp *crypto_alloc 1552 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 502 1553 u32 type, u32 mask) 503 { 1554 { 504 type &= ~CRYPTO_ALG_TYPE_MASK; 1555 type &= ~CRYPTO_ALG_TYPE_MASK; 505 type |= CRYPTO_ALG_TYPE_COMPRESS; 1556 type |= CRYPTO_ALG_TYPE_COMPRESS; 506 mask |= CRYPTO_ALG_TYPE_MASK; 1557 mask |= CRYPTO_ALG_TYPE_MASK; 507 1558 508 return __crypto_comp_cast(crypto_alloc 1559 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 509 } 1560 } 510 1561 511 static inline struct crypto_tfm *crypto_comp_t 1562 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 512 { 1563 { 513 return &tfm->base; 1564 return &tfm->base; 514 } 1565 } 515 1566 516 static inline void crypto_free_comp(struct cry 1567 static inline void crypto_free_comp(struct crypto_comp *tfm) 517 { 1568 { 518 crypto_free_tfm(crypto_comp_tfm(tfm)); 1569 crypto_free_tfm(crypto_comp_tfm(tfm)); 519 } 1570 } 520 1571 521 static inline int crypto_has_comp(const char * 1572 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 522 { 1573 { 523 type &= ~CRYPTO_ALG_TYPE_MASK; 1574 type &= ~CRYPTO_ALG_TYPE_MASK; 524 type |= CRYPTO_ALG_TYPE_COMPRESS; 1575 type |= CRYPTO_ALG_TYPE_COMPRESS; 525 mask |= CRYPTO_ALG_TYPE_MASK; 1576 mask |= CRYPTO_ALG_TYPE_MASK; 526 1577 527 return crypto_has_alg(alg_name, type, 1578 return crypto_has_alg(alg_name, type, mask); 528 } 1579 } 529 1580 530 static inline const char *crypto_comp_name(str 1581 static inline const char *crypto_comp_name(struct crypto_comp *tfm) 531 { 1582 { 532 return crypto_tfm_alg_name(crypto_comp 1583 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 533 } 1584 } 534 1585 535 int crypto_comp_compress(struct crypto_comp *t !! 1586 static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) 536 const u8 *src, unsign !! 1587 { 537 u8 *dst, unsigned int !! 1588 return &crypto_comp_tfm(tfm)->crt_compress; 538 !! 1589 } 539 int crypto_comp_decompress(struct crypto_comp !! 1590 540 const u8 *src, unsi !! 1591 static inline int crypto_comp_compress(struct crypto_comp *tfm, 541 u8 *dst, unsigned i !! 1592 const u8 *src, unsigned int slen, >> 1593 u8 *dst, unsigned int *dlen) >> 1594 { >> 1595 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), >> 1596 src, slen, dst, dlen); >> 1597 } >> 1598 >> 1599 static inline int crypto_comp_decompress(struct crypto_comp *tfm, >> 1600 const u8 *src, unsigned int slen, >> 1601 u8 *dst, unsigned int *dlen) >> 1602 { >> 1603 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), >> 1604 src, slen, dst, dlen); >> 1605 } 542 1606 543 #endif /* _LINUX_CRYPTO_H */ 1607 #endif /* _LINUX_CRYPTO_H */ 544 1608 545 1609
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.