1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * Asynchronous Cryptographic Hash operations. 2 * Asynchronous Cryptographic Hash operations. 4 * 3 * 5 * This is the implementation of the ahash (as !! 4 * This is the asynchronous version of hash.c with notification of 6 * from shash (synchronous hash) in that ahash !! 5 * completion via a callback. 7 * and it hashes data from scatterlists instea << 8 * << 9 * The ahash API provides access to both ahash << 10 * API only provides access to shash algorithm << 11 * 6 * 12 * Copyright (c) 2008 Loc Ho <lho@amcc.com> 7 * Copyright (c) 2008 Loc Ho <lho@amcc.com> >> 8 * >> 9 * This program is free software; you can redistribute it and/or modify it >> 10 * under the terms of the GNU General Public License as published by the Free >> 11 * Software Foundation; either version 2 of the License, or (at your option) >> 12 * any later version. >> 13 * 13 */ 14 */ 14 15 >> 16 #include <crypto/internal/hash.h> 15 #include <crypto/scatterwalk.h> 17 #include <crypto/scatterwalk.h> 16 #include <linux/cryptouser.h> !! 18 #include <linux/bug.h> 17 #include <linux/err.h> 19 #include <linux/err.h> 18 #include <linux/kernel.h> 20 #include <linux/kernel.h> 19 #include <linux/module.h> 21 #include <linux/module.h> 20 #include <linux/sched.h> 22 #include <linux/sched.h> 21 #include <linux/slab.h> 23 #include <linux/slab.h> 22 #include <linux/seq_file.h> 24 #include <linux/seq_file.h> 23 #include <linux/string.h> !! 25 #include <linux/cryptouser.h> >> 26 #include <linux/compiler.h> 24 #include <net/netlink.h> 27 #include <net/netlink.h> 25 28 26 #include "hash.h" !! 29 #include "internal.h" 27 << 28 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000 << 29 << 30 /* << 31 * For an ahash tfm that is using an shash alg << 32 * algorithm), this returns the underlying sha << 33 */ << 34 static inline struct crypto_shash *ahash_to_sh << 35 { << 36 return *(struct crypto_shash **)crypto << 37 } << 38 << 39 static inline struct shash_desc *prepare_shash << 40 << 41 { << 42 struct shash_desc *desc = ahash_reques << 43 << 44 desc->tfm = ahash_to_shash(tfm); << 45 return desc; << 46 } << 47 30 48 int shash_ahash_update(struct ahash_request *r !! 31 struct ahash_request_priv { 49 { !! 32 crypto_completion_t complete; 50 struct crypto_hash_walk walk; !! 33 void *data; 51 int nbytes; !! 34 u8 *result; 52 !! 35 u32 flags; 53 for (nbytes = crypto_hash_walk_first(r !! 36 void *ubuf[] CRYPTO_MINALIGN_ATTR; 54 nbytes = crypto_hash_walk_done(&w !! 37 }; 55 nbytes = crypto_shash_update(d << 56 << 57 return nbytes; << 58 } << 59 EXPORT_SYMBOL_GPL(shash_ahash_update); << 60 << 61 int shash_ahash_finup(struct ahash_request *re << 62 { << 63 struct crypto_hash_walk walk; << 64 int nbytes; << 65 << 66 nbytes = crypto_hash_walk_first(req, & << 67 if (!nbytes) << 68 return crypto_shash_final(desc << 69 << 70 do { << 71 nbytes = crypto_hash_walk_last << 72 crypto_shash_finup(de << 73 re << 74 crypto_shash_update(d << 75 nbytes = crypto_hash_walk_done << 76 } while (nbytes > 0); << 77 << 78 return nbytes; << 79 } << 80 EXPORT_SYMBOL_GPL(shash_ahash_finup); << 81 << 82 int shash_ahash_digest(struct ahash_request *r << 83 { << 84 unsigned int nbytes = req->nbytes; << 85 struct scatterlist *sg; << 86 unsigned int offset; << 87 int err; << 88 << 89 if (nbytes && << 90 (sg = req->src, offset = sg->offse << 91 nbytes <= min(sg->length, ((unsig << 92 void *data; << 93 << 94 data = kmap_local_page(sg_page << 95 err = crypto_shash_digest(desc << 96 req- << 97 kunmap_local(data); << 98 } else << 99 err = crypto_shash_init(desc) << 100 shash_ahash_finup(req, d << 101 << 102 return err; << 103 } << 104 EXPORT_SYMBOL_GPL(shash_ahash_digest); << 105 << 106 static void crypto_exit_ahash_using_shash(stru << 107 { << 108 struct crypto_shash **ctx = crypto_tfm << 109 << 110 crypto_free_shash(*ctx); << 111 } << 112 38 113 static int crypto_init_ahash_using_shash(struc !! 39 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) 114 { 40 { 115 struct crypto_alg *calg = tfm->__crt_a !! 41 return container_of(crypto_hash_alg_common(hash), struct ahash_alg, 116 struct crypto_ahash *crt = __crypto_ah !! 42 halg); 117 struct crypto_shash **ctx = crypto_tfm << 118 struct crypto_shash *shash; << 119 << 120 if (!crypto_mod_get(calg)) << 121 return -EAGAIN; << 122 << 123 shash = crypto_create_tfm(calg, &crypt << 124 if (IS_ERR(shash)) { << 125 crypto_mod_put(calg); << 126 return PTR_ERR(shash); << 127 } << 128 << 129 crt->using_shash = true; << 130 *ctx = shash; << 131 tfm->exit = crypto_exit_ahash_using_sh << 132 << 133 crypto_ahash_set_flags(crt, crypto_sha << 134 CRYPTO_TFM << 135 crt->reqsize = sizeof(struct shash_des << 136 << 137 return 0; << 138 } 43 } 139 44 140 static int hash_walk_next(struct crypto_hash_w 45 static int hash_walk_next(struct crypto_hash_walk *walk) 141 { 46 { >> 47 unsigned int alignmask = walk->alignmask; 142 unsigned int offset = walk->offset; 48 unsigned int offset = walk->offset; 143 unsigned int nbytes = min(walk->entryl 49 unsigned int nbytes = min(walk->entrylen, 144 ((unsigned i 50 ((unsigned int)(PAGE_SIZE)) - offset); 145 51 146 walk->data = kmap_local_page(walk->pg) !! 52 if (walk->flags & CRYPTO_ALG_ASYNC) >> 53 walk->data = kmap(walk->pg); >> 54 else >> 55 walk->data = kmap_atomic(walk->pg); 147 walk->data += offset; 56 walk->data += offset; >> 57 >> 58 if (offset & alignmask) { >> 59 unsigned int unaligned = alignmask + 1 - (offset & alignmask); >> 60 >> 61 if (nbytes > unaligned) >> 62 nbytes = unaligned; >> 63 } >> 64 148 walk->entrylen -= nbytes; 65 walk->entrylen -= nbytes; 149 return nbytes; 66 return nbytes; 150 } 67 } 151 68 152 static int hash_walk_new_entry(struct crypto_h 69 static int hash_walk_new_entry(struct crypto_hash_walk *walk) 153 { 70 { 154 struct scatterlist *sg; 71 struct scatterlist *sg; 155 72 156 sg = walk->sg; 73 sg = walk->sg; 157 walk->offset = sg->offset; 74 walk->offset = sg->offset; 158 walk->pg = sg_page(walk->sg) + (walk-> 75 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 159 walk->offset = offset_in_page(walk->of 76 walk->offset = offset_in_page(walk->offset); 160 walk->entrylen = sg->length; 77 walk->entrylen = sg->length; 161 78 162 if (walk->entrylen > walk->total) 79 if (walk->entrylen > walk->total) 163 walk->entrylen = walk->total; 80 walk->entrylen = walk->total; 164 walk->total -= walk->entrylen; 81 walk->total -= walk->entrylen; 165 82 166 return hash_walk_next(walk); 83 return hash_walk_next(walk); 167 } 84 } 168 85 169 int crypto_hash_walk_done(struct crypto_hash_w 86 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) 170 { 87 { >> 88 unsigned int alignmask = walk->alignmask; >> 89 unsigned int nbytes = walk->entrylen; >> 90 171 walk->data -= walk->offset; 91 walk->data -= walk->offset; 172 92 173 kunmap_local(walk->data); !! 93 if (nbytes && walk->offset & alignmask && !err) { 174 crypto_yield(walk->flags); !! 94 walk->offset = ALIGN(walk->offset, alignmask + 1); >> 95 nbytes = min(nbytes, >> 96 ((unsigned int)(PAGE_SIZE)) - walk->offset); >> 97 walk->entrylen -= nbytes; >> 98 >> 99 if (nbytes) { >> 100 walk->data += walk->offset; >> 101 return nbytes; >> 102 } >> 103 } >> 104 >> 105 if (walk->flags & CRYPTO_ALG_ASYNC) >> 106 kunmap(walk->pg); >> 107 else { >> 108 kunmap_atomic(walk->data); >> 109 /* >> 110 * The may sleep test only makes sense for sync users. >> 111 * Async users don't need to sleep here anyway. >> 112 */ >> 113 crypto_yield(walk->flags); >> 114 } 175 115 176 if (err) 116 if (err) 177 return err; 117 return err; 178 118 179 if (walk->entrylen) { !! 119 if (nbytes) { 180 walk->offset = 0; 120 walk->offset = 0; 181 walk->pg++; 121 walk->pg++; 182 return hash_walk_next(walk); 122 return hash_walk_next(walk); 183 } 123 } 184 124 185 if (!walk->total) 125 if (!walk->total) 186 return 0; 126 return 0; 187 127 188 walk->sg = sg_next(walk->sg); 128 walk->sg = sg_next(walk->sg); 189 129 190 return hash_walk_new_entry(walk); 130 return hash_walk_new_entry(walk); 191 } 131 } 192 EXPORT_SYMBOL_GPL(crypto_hash_walk_done); 132 EXPORT_SYMBOL_GPL(crypto_hash_walk_done); 193 133 194 int crypto_hash_walk_first(struct ahash_reques 134 int crypto_hash_walk_first(struct ahash_request *req, 195 struct crypto_hash_ 135 struct crypto_hash_walk *walk) 196 { 136 { 197 walk->total = req->nbytes; 137 walk->total = req->nbytes; 198 138 199 if (!walk->total) { 139 if (!walk->total) { 200 walk->entrylen = 0; 140 walk->entrylen = 0; 201 return 0; 141 return 0; 202 } 142 } 203 143 >> 144 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); 204 walk->sg = req->src; 145 walk->sg = req->src; 205 walk->flags = req->base.flags; !! 146 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; 206 147 207 return hash_walk_new_entry(walk); 148 return hash_walk_new_entry(walk); 208 } 149 } 209 EXPORT_SYMBOL_GPL(crypto_hash_walk_first); 150 EXPORT_SYMBOL_GPL(crypto_hash_walk_first); 210 151 211 static int ahash_nosetkey(struct crypto_ahash !! 152 int crypto_ahash_walk_first(struct ahash_request *req, 212 unsigned int keylen) !! 153 struct crypto_hash_walk *walk) 213 { 154 { 214 return -ENOSYS; !! 155 walk->total = req->nbytes; >> 156 >> 157 if (!walk->total) { >> 158 walk->entrylen = 0; >> 159 return 0; >> 160 } >> 161 >> 162 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); >> 163 walk->sg = req->src; >> 164 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; >> 165 walk->flags |= CRYPTO_ALG_ASYNC; >> 166 >> 167 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); >> 168 >> 169 return hash_walk_new_entry(walk); 215 } 170 } >> 171 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); 216 172 217 static void ahash_set_needkey(struct crypto_ah !! 173 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, >> 174 unsigned int keylen) 218 { 175 { 219 if (alg->setkey != ahash_nosetkey && !! 176 unsigned long alignmask = crypto_ahash_alignmask(tfm); 220 !(alg->halg.base.cra_flags & CRYPT !! 177 int ret; 221 crypto_ahash_set_flags(tfm, CR !! 178 u8 *buffer, *alignbuffer; >> 179 unsigned long absize; >> 180 >> 181 absize = keylen + alignmask; >> 182 buffer = kmalloc(absize, GFP_KERNEL); >> 183 if (!buffer) >> 184 return -ENOMEM; >> 185 >> 186 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); >> 187 memcpy(alignbuffer, key, keylen); >> 188 ret = tfm->setkey(tfm, alignbuffer, keylen); >> 189 kzfree(buffer); >> 190 return ret; 222 } 191 } 223 192 224 int crypto_ahash_setkey(struct crypto_ahash *t 193 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 225 unsigned int keylen) 194 unsigned int keylen) 226 { 195 { 227 if (likely(tfm->using_shash)) { !! 196 unsigned long alignmask = crypto_ahash_alignmask(tfm); 228 struct crypto_shash *shash = a !! 197 int err; 229 int err; !! 198 230 !! 199 if ((unsigned long)key & alignmask) 231 err = crypto_shash_setkey(shas !! 200 err = ahash_setkey_unaligned(tfm, key, keylen); 232 if (unlikely(err)) { !! 201 else 233 crypto_ahash_set_flags !! 202 err = tfm->setkey(tfm, key, keylen); 234 !! 203 235 !! 204 if (err) 236 return err; !! 205 return err; 237 } !! 206 238 } else { << 239 struct ahash_alg *alg = crypto << 240 int err; << 241 << 242 err = alg->setkey(tfm, key, ke << 243 if (unlikely(err)) { << 244 ahash_set_needkey(tfm, << 245 return err; << 246 } << 247 } << 248 crypto_ahash_clear_flags(tfm, CRYPTO_T 207 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 249 return 0; 208 return 0; 250 } 209 } 251 EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 210 EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 252 211 253 int crypto_ahash_init(struct ahash_request *re !! 212 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, >> 213 unsigned int keylen) 254 { 214 { 255 struct crypto_ahash *tfm = crypto_ahas !! 215 return -ENOSYS; >> 216 } 256 217 257 if (likely(tfm->using_shash)) !! 218 static inline unsigned int ahash_align_buffer_size(unsigned len, 258 return crypto_shash_init(prepa !! 219 unsigned long mask) 259 if (crypto_ahash_get_flags(tfm) & CRYP !! 220 { 260 return -ENOKEY; !! 221 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); 261 return crypto_ahash_alg(tfm)->init(req << 262 } 222 } 263 EXPORT_SYMBOL_GPL(crypto_ahash_init); << 264 223 265 static int ahash_save_req(struct ahash_request !! 224 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) 266 bool has_state) << 267 { 225 { 268 struct crypto_ahash *tfm = crypto_ahas 226 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); >> 227 unsigned long alignmask = crypto_ahash_alignmask(tfm); 269 unsigned int ds = crypto_ahash_digests 228 unsigned int ds = crypto_ahash_digestsize(tfm); 270 struct ahash_request *subreq; !! 229 struct ahash_request_priv *priv; 271 unsigned int subreq_size; << 272 unsigned int reqsize; << 273 u8 *result; << 274 gfp_t gfp; << 275 u32 flags; << 276 230 277 subreq_size = sizeof(*subreq); !! 231 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), 278 reqsize = crypto_ahash_reqsize(tfm); !! 232 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 279 reqsize = ALIGN(reqsize, crypto_tfm_ct !! 233 GFP_KERNEL : GFP_ATOMIC); 280 subreq_size += reqsize; !! 234 if (!priv) 281 subreq_size += ds; << 282 << 283 flags = ahash_request_flags(req); << 284 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEE << 285 subreq = kmalloc(subreq_size, gfp); << 286 if (!subreq) << 287 return -ENOMEM; 235 return -ENOMEM; 288 236 289 ahash_request_set_tfm(subreq, tfm); !! 237 /* 290 ahash_request_set_callback(subreq, fla !! 238 * WARNING: Voodoo programming below! >> 239 * >> 240 * The code below is obscure and hard to understand, thus explanation >> 241 * is necessary. See include/crypto/hash.h and include/linux/crypto.h >> 242 * to understand the layout of structures used here! >> 243 * >> 244 * The code here will replace portions of the ORIGINAL request with >> 245 * pointers to new code and buffers so the hashing operation can store >> 246 * the result in aligned buffer. We will call the modified request >> 247 * an ADJUSTED request. >> 248 * >> 249 * The newly mangled request will look as such: >> 250 * >> 251 * req { >> 252 * .result = ADJUSTED[new aligned buffer] >> 253 * .base.complete = ADJUSTED[pointer to completion function] >> 254 * .base.data = ADJUSTED[*req (pointer to self)] >> 255 * .priv = ADJUSTED[new priv] { >> 256 * .result = ORIGINAL(result) >> 257 * .complete = ORIGINAL(base.complete) >> 258 * .data = ORIGINAL(base.data) >> 259 * } >> 260 */ >> 261 >> 262 priv->result = req->result; >> 263 priv->complete = req->base.complete; >> 264 priv->data = req->base.data; >> 265 priv->flags = req->base.flags; >> 266 >> 267 /* >> 268 * WARNING: We do not backup req->priv here! The req->priv >> 269 * is for internal use of the Crypto API and the >> 270 * user must _NOT_ _EVER_ depend on it's content! >> 271 */ >> 272 >> 273 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); >> 274 req->base.complete = cplt; >> 275 req->base.data = req; >> 276 req->priv = priv; 291 277 292 result = (u8 *)(subreq + 1) + reqsize; !! 278 return 0; >> 279 } 293 280 294 ahash_request_set_crypt(subreq, req->s !! 281 static void ahash_restore_req(struct ahash_request *req, int err) >> 282 { >> 283 struct ahash_request_priv *priv = req->priv; 295 284 296 if (has_state) { !! 285 if (!err) 297 void *state; !! 286 memcpy(priv->result, req->result, >> 287 crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 298 288 299 state = kmalloc(crypto_ahash_s !! 289 /* Restore the original crypto request. */ 300 if (!state) { !! 290 req->result = priv->result; 301 kfree(subreq); << 302 return -ENOMEM; << 303 } << 304 291 305 crypto_ahash_export(req, state !! 292 ahash_request_set_callback(req, priv->flags, 306 crypto_ahash_import(subreq, st !! 293 priv->complete, priv->data); 307 kfree_sensitive(state); !! 294 req->priv = NULL; 308 } << 309 295 310 req->priv = subreq; !! 296 /* Free the req->priv.priv from the ADJUSTED request. */ >> 297 kzfree(priv); >> 298 } 311 299 312 return 0; !! 300 static void ahash_notify_einprogress(struct ahash_request *req) >> 301 { >> 302 struct ahash_request_priv *priv = req->priv; >> 303 struct crypto_async_request oreq; >> 304 >> 305 oreq.data = priv->data; >> 306 >> 307 priv->complete(&oreq, -EINPROGRESS); 313 } 308 } 314 309 315 static void ahash_restore_req(struct ahash_req !! 310 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) 316 { 311 { 317 struct ahash_request *subreq = req->pr !! 312 struct ahash_request *areq = req->data; 318 313 319 if (!err) !! 314 if (err == -EINPROGRESS) { 320 memcpy(req->result, subreq->re !! 315 ahash_notify_einprogress(areq); 321 crypto_ahash_digestsize !! 316 return; >> 317 } 322 318 323 req->priv = NULL; !! 319 /* >> 320 * Restore the original request, see ahash_op_unaligned() for what >> 321 * goes where. >> 322 * >> 323 * The "struct ahash_request *req" here is in fact the "req.base" >> 324 * from the ADJUSTED request from ahash_op_unaligned(), thus as it >> 325 * is a pointer to self, it is also the ADJUSTED "req" . >> 326 */ 324 327 325 kfree_sensitive(subreq); !! 328 /* First copy req->result into req->priv.result */ >> 329 ahash_restore_req(areq, err); >> 330 >> 331 /* Complete the ORIGINAL request. */ >> 332 areq->base.complete(&areq->base, err); 326 } 333 } 327 334 328 int crypto_ahash_update(struct ahash_request * !! 335 static int ahash_op_unaligned(struct ahash_request *req, >> 336 int (*op)(struct ahash_request *)) 329 { 337 { 330 struct crypto_ahash *tfm = crypto_ahas !! 338 int err; >> 339 >> 340 err = ahash_save_req(req, ahash_op_unaligned_done); >> 341 if (err) >> 342 return err; >> 343 >> 344 err = op(req); >> 345 if (err == -EINPROGRESS || err == -EBUSY) >> 346 return err; 331 347 332 if (likely(tfm->using_shash)) !! 348 ahash_restore_req(req, err); 333 return shash_ahash_update(req, << 334 349 335 return crypto_ahash_alg(tfm)->update(r !! 350 return err; 336 } 351 } 337 EXPORT_SYMBOL_GPL(crypto_ahash_update); << 338 352 339 int crypto_ahash_final(struct ahash_request *r !! 353 static int crypto_ahash_op(struct ahash_request *req, >> 354 int (*op)(struct ahash_request *)) 340 { 355 { 341 struct crypto_ahash *tfm = crypto_ahas 356 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); >> 357 unsigned long alignmask = crypto_ahash_alignmask(tfm); >> 358 >> 359 if ((unsigned long)req->result & alignmask) >> 360 return ahash_op_unaligned(req, op); 342 361 343 if (likely(tfm->using_shash)) !! 362 return op(req); 344 return crypto_shash_final(ahas !! 363 } 345 364 346 return crypto_ahash_alg(tfm)->final(re !! 365 int crypto_ahash_final(struct ahash_request *req) >> 366 { >> 367 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); 347 } 368 } 348 EXPORT_SYMBOL_GPL(crypto_ahash_final); 369 EXPORT_SYMBOL_GPL(crypto_ahash_final); 349 370 350 int crypto_ahash_finup(struct ahash_request *r 371 int crypto_ahash_finup(struct ahash_request *req) 351 { 372 { 352 struct crypto_ahash *tfm = crypto_ahas !! 373 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); 353 << 354 if (likely(tfm->using_shash)) << 355 return shash_ahash_finup(req, << 356 << 357 return crypto_ahash_alg(tfm)->finup(re << 358 } 374 } 359 EXPORT_SYMBOL_GPL(crypto_ahash_finup); 375 EXPORT_SYMBOL_GPL(crypto_ahash_finup); 360 376 361 int crypto_ahash_digest(struct ahash_request * 377 int crypto_ahash_digest(struct ahash_request *req) 362 { 378 { 363 struct crypto_ahash *tfm = crypto_ahas 379 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 364 380 365 if (likely(tfm->using_shash)) << 366 return shash_ahash_digest(req, << 367 << 368 if (crypto_ahash_get_flags(tfm) & CRYP 381 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 369 return -ENOKEY; 382 return -ENOKEY; 370 383 371 return crypto_ahash_alg(tfm)->digest(r !! 384 return crypto_ahash_op(req, tfm->digest); 372 } 385 } 373 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 386 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 374 387 375 static void ahash_def_finup_done2(void *data, !! 388 static void ahash_def_finup_done2(struct crypto_async_request *req, int err) 376 { 389 { 377 struct ahash_request *areq = data; !! 390 struct ahash_request *areq = req->data; 378 391 379 if (err == -EINPROGRESS) 392 if (err == -EINPROGRESS) 380 return; 393 return; 381 394 382 ahash_restore_req(areq, err); 395 ahash_restore_req(areq, err); 383 396 384 ahash_request_complete(areq, err); !! 397 areq->base.complete(&areq->base, err); 385 } 398 } 386 399 387 static int ahash_def_finup_finish1(struct ahas 400 static int ahash_def_finup_finish1(struct ahash_request *req, int err) 388 { 401 { 389 struct ahash_request *subreq = req->pr << 390 << 391 if (err) 402 if (err) 392 goto out; 403 goto out; 393 404 394 subreq->base.complete = ahash_def_finu !! 405 req->base.complete = ahash_def_finup_done2; 395 406 396 err = crypto_ahash_alg(crypto_ahash_re !! 407 err = crypto_ahash_reqtfm(req)->final(req); 397 if (err == -EINPROGRESS || err == -EBU 408 if (err == -EINPROGRESS || err == -EBUSY) 398 return err; 409 return err; 399 410 400 out: 411 out: 401 ahash_restore_req(req, err); 412 ahash_restore_req(req, err); 402 return err; 413 return err; 403 } 414 } 404 415 405 static void ahash_def_finup_done1(void *data, !! 416 static void ahash_def_finup_done1(struct crypto_async_request *req, int err) 406 { 417 { 407 struct ahash_request *areq = data; !! 418 struct ahash_request *areq = req->data; 408 struct ahash_request *subreq; << 409 419 410 if (err == -EINPROGRESS) !! 420 if (err == -EINPROGRESS) { 411 goto out; !! 421 ahash_notify_einprogress(areq); >> 422 return; >> 423 } 412 424 413 subreq = areq->priv; !! 425 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 414 subreq->base.flags &= CRYPTO_TFM_REQ_M << 415 426 416 err = ahash_def_finup_finish1(areq, er 427 err = ahash_def_finup_finish1(areq, err); 417 if (err == -EINPROGRESS || err == -EBU !! 428 if (areq->priv) 418 return; 429 return; 419 430 420 out: !! 431 areq->base.complete(&areq->base, err); 421 ahash_request_complete(areq, err); << 422 } 432 } 423 433 424 static int ahash_def_finup(struct ahash_reques 434 static int ahash_def_finup(struct ahash_request *req) 425 { 435 { 426 struct crypto_ahash *tfm = crypto_ahas 436 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 427 int err; 437 int err; 428 438 429 err = ahash_save_req(req, ahash_def_fi !! 439 err = ahash_save_req(req, ahash_def_finup_done1); 430 if (err) 440 if (err) 431 return err; 441 return err; 432 442 433 err = crypto_ahash_alg(tfm)->update(re !! 443 err = tfm->update(req); 434 if (err == -EINPROGRESS || err == -EBU 444 if (err == -EINPROGRESS || err == -EBUSY) 435 return err; 445 return err; 436 446 437 return ahash_def_finup_finish1(req, er 447 return ahash_def_finup_finish1(req, err); 438 } 448 } 439 449 440 int crypto_ahash_export(struct ahash_request * << 441 { << 442 struct crypto_ahash *tfm = crypto_ahas << 443 << 444 if (likely(tfm->using_shash)) << 445 return crypto_shash_export(aha << 446 return crypto_ahash_alg(tfm)->export(r << 447 } << 448 EXPORT_SYMBOL_GPL(crypto_ahash_export); << 449 << 450 int crypto_ahash_import(struct ahash_request * << 451 { << 452 struct crypto_ahash *tfm = crypto_ahas << 453 << 454 if (likely(tfm->using_shash)) << 455 return crypto_shash_import(pre << 456 if (crypto_ahash_get_flags(tfm) & CRYP << 457 return -ENOKEY; << 458 return crypto_ahash_alg(tfm)->import(r << 459 } << 460 EXPORT_SYMBOL_GPL(crypto_ahash_import); << 461 << 462 static void crypto_ahash_exit_tfm(struct crypt << 463 { << 464 struct crypto_ahash *hash = __crypto_a << 465 struct ahash_alg *alg = crypto_ahash_a << 466 << 467 alg->exit_tfm(hash); << 468 } << 469 << 470 static int crypto_ahash_init_tfm(struct crypto 450 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) 471 { 451 { 472 struct crypto_ahash *hash = __crypto_a 452 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 473 struct ahash_alg *alg = crypto_ahash_a 453 struct ahash_alg *alg = crypto_ahash_alg(hash); 474 454 475 crypto_ahash_set_statesize(hash, alg-> !! 455 hash->setkey = ahash_nosetkey; 476 << 477 if (tfm->__crt_alg->cra_type == &crypt << 478 return crypto_init_ahash_using << 479 456 480 ahash_set_needkey(hash, alg); !! 457 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) >> 458 return crypto_init_shash_ops_async(tfm); 481 459 482 if (alg->exit_tfm) !! 460 hash->init = alg->init; 483 tfm->exit = crypto_ahash_exit_ !! 461 hash->update = alg->update; >> 462 hash->final = alg->final; >> 463 hash->finup = alg->finup ?: ahash_def_finup; >> 464 hash->digest = alg->digest; >> 465 hash->export = alg->export; >> 466 hash->import = alg->import; >> 467 >> 468 if (alg->setkey) { >> 469 hash->setkey = alg->setkey; >> 470 if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) >> 471 crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); >> 472 } 484 473 485 return alg->init_tfm ? alg->init_tfm(h !! 474 return 0; 486 } 475 } 487 476 488 static unsigned int crypto_ahash_extsize(struc 477 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) 489 { 478 { 490 if (alg->cra_type == &crypto_shash_typ !! 479 if (alg->cra_type != &crypto_ahash_type) 491 return sizeof(struct crypto_sh 480 return sizeof(struct crypto_shash *); 492 481 493 return crypto_alg_extsize(alg); 482 return crypto_alg_extsize(alg); 494 } 483 } 495 484 496 static void crypto_ahash_free_instance(struct !! 485 #ifdef CONFIG_NET 497 { !! 486 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) 498 struct ahash_instance *ahash = ahash_i << 499 << 500 ahash->free(ahash); << 501 } << 502 << 503 static int __maybe_unused crypto_ahash_report( << 504 struct sk_buff *skb, struct crypto_alg << 505 { 487 { 506 struct crypto_report_hash rhash; 488 struct crypto_report_hash rhash; 507 489 508 memset(&rhash, 0, sizeof(rhash)); !! 490 strncpy(rhash.type, "ahash", sizeof(rhash.type)); 509 << 510 strscpy(rhash.type, "ahash", sizeof(rh << 511 491 512 rhash.blocksize = alg->cra_blocksize; 492 rhash.blocksize = alg->cra_blocksize; 513 rhash.digestsize = __crypto_hash_alg_c 493 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; 514 494 515 return nla_put(skb, CRYPTOCFGA_REPORT_ !! 495 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, >> 496 sizeof(struct crypto_report_hash), &rhash)) >> 497 goto nla_put_failure; >> 498 return 0; >> 499 >> 500 nla_put_failure: >> 501 return -EMSGSIZE; >> 502 } >> 503 #else >> 504 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) >> 505 { >> 506 return -ENOSYS; 516 } 507 } >> 508 #endif 517 509 518 static void crypto_ahash_show(struct seq_file 510 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 519 __maybe_unused; 511 __maybe_unused; 520 static void crypto_ahash_show(struct seq_file 512 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 521 { 513 { 522 seq_printf(m, "type : ahash\n" 514 seq_printf(m, "type : ahash\n"); 523 seq_printf(m, "async : %s\n", a 515 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 524 " 516 "yes" : "no"); 525 seq_printf(m, "blocksize : %u\n", a 517 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 526 seq_printf(m, "digestsize : %u\n", 518 seq_printf(m, "digestsize : %u\n", 527 __crypto_hash_alg_common(al 519 __crypto_hash_alg_common(alg)->digestsize); 528 } 520 } 529 521 530 static const struct crypto_type crypto_ahash_t !! 522 const struct crypto_type crypto_ahash_type = { 531 .extsize = crypto_ahash_extsize, 523 .extsize = crypto_ahash_extsize, 532 .init_tfm = crypto_ahash_init_tfm, 524 .init_tfm = crypto_ahash_init_tfm, 533 .free = crypto_ahash_free_instance, << 534 #ifdef CONFIG_PROC_FS 525 #ifdef CONFIG_PROC_FS 535 .show = crypto_ahash_show, 526 .show = crypto_ahash_show, 536 #endif 527 #endif 537 #if IS_ENABLED(CONFIG_CRYPTO_USER) << 538 .report = crypto_ahash_report, 528 .report = crypto_ahash_report, 539 #endif << 540 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 529 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 541 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 530 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 542 .type = CRYPTO_ALG_TYPE_AHASH, 531 .type = CRYPTO_ALG_TYPE_AHASH, 543 .tfmsize = offsetof(struct crypto_ahas 532 .tfmsize = offsetof(struct crypto_ahash, base), 544 }; 533 }; 545 !! 534 EXPORT_SYMBOL_GPL(crypto_ahash_type); 546 int crypto_grab_ahash(struct crypto_ahash_spaw << 547 struct crypto_instance * << 548 const char *name, u32 ty << 549 { << 550 spawn->base.frontend = &crypto_ahash_t << 551 return crypto_grab_spawn(&spawn->base, << 552 } << 553 EXPORT_SYMBOL_GPL(crypto_grab_ahash); << 554 535 555 struct crypto_ahash *crypto_alloc_ahash(const 536 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, 556 u32 ma 537 u32 mask) 557 { 538 { 558 return crypto_alloc_tfm(alg_name, &cry 539 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); 559 } 540 } 560 EXPORT_SYMBOL_GPL(crypto_alloc_ahash); 541 EXPORT_SYMBOL_GPL(crypto_alloc_ahash); 561 542 562 int crypto_has_ahash(const char *alg_name, u32 543 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) 563 { 544 { 564 return crypto_type_has_alg(alg_name, & 545 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); 565 } 546 } 566 EXPORT_SYMBOL_GPL(crypto_has_ahash); 547 EXPORT_SYMBOL_GPL(crypto_has_ahash); 567 548 568 static bool crypto_hash_alg_has_setkey(struct << 569 { << 570 struct crypto_alg *alg = &halg->base; << 571 << 572 if (alg->cra_type == &crypto_shash_typ << 573 return crypto_shash_alg_has_se << 574 << 575 return __crypto_ahash_alg(alg)->setkey << 576 } << 577 << 578 struct crypto_ahash *crypto_clone_ahash(struct << 579 { << 580 struct hash_alg_common *halg = crypto_ << 581 struct crypto_tfm *tfm = crypto_ahash_ << 582 struct crypto_ahash *nhash; << 583 struct ahash_alg *alg; << 584 int err; << 585 << 586 if (!crypto_hash_alg_has_setkey(halg)) << 587 tfm = crypto_tfm_get(tfm); << 588 if (IS_ERR(tfm)) << 589 return ERR_CAST(tfm); << 590 << 591 return hash; << 592 } << 593 << 594 nhash = crypto_clone_tfm(&crypto_ahash << 595 << 596 if (IS_ERR(nhash)) << 597 return nhash; << 598 << 599 nhash->reqsize = hash->reqsize; << 600 nhash->statesize = hash->statesize; << 601 << 602 if (likely(hash->using_shash)) { << 603 struct crypto_shash **nctx = c << 604 struct crypto_shash *shash; << 605 << 606 shash = crypto_clone_shash(aha << 607 if (IS_ERR(shash)) { << 608 err = PTR_ERR(shash); << 609 goto out_free_nhash; << 610 } << 611 nhash->using_shash = true; << 612 *nctx = shash; << 613 return nhash; << 614 } << 615 << 616 err = -ENOSYS; << 617 alg = crypto_ahash_alg(hash); << 618 if (!alg->clone_tfm) << 619 goto out_free_nhash; << 620 << 621 err = alg->clone_tfm(nhash, hash); << 622 if (err) << 623 goto out_free_nhash; << 624 << 625 return nhash; << 626 << 627 out_free_nhash: << 628 crypto_free_ahash(nhash); << 629 return ERR_PTR(err); << 630 } << 631 EXPORT_SYMBOL_GPL(crypto_clone_ahash); << 632 << 633 static int ahash_prepare_alg(struct ahash_alg 549 static int ahash_prepare_alg(struct ahash_alg *alg) 634 { 550 { 635 struct crypto_alg *base = &alg->halg.b 551 struct crypto_alg *base = &alg->halg.base; 636 int err; << 637 552 638 if (alg->halg.statesize == 0) !! 553 if (alg->halg.digestsize > PAGE_SIZE / 8 || >> 554 alg->halg.statesize > PAGE_SIZE / 8 || >> 555 alg->halg.statesize == 0) 639 return -EINVAL; 556 return -EINVAL; 640 557 641 err = hash_prepare_alg(&alg->halg); << 642 if (err) << 643 return err; << 644 << 645 base->cra_type = &crypto_ahash_type; 558 base->cra_type = &crypto_ahash_type; >> 559 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 646 base->cra_flags |= CRYPTO_ALG_TYPE_AHA 560 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; 647 561 648 if (!alg->finup) << 649 alg->finup = ahash_def_finup; << 650 if (!alg->setkey) << 651 alg->setkey = ahash_nosetkey; << 652 << 653 return 0; 562 return 0; 654 } 563 } 655 564 656 int crypto_register_ahash(struct ahash_alg *al 565 int crypto_register_ahash(struct ahash_alg *alg) 657 { 566 { 658 struct crypto_alg *base = &alg->halg.b 567 struct crypto_alg *base = &alg->halg.base; 659 int err; 568 int err; 660 569 661 err = ahash_prepare_alg(alg); 570 err = ahash_prepare_alg(alg); 662 if (err) 571 if (err) 663 return err; 572 return err; 664 573 665 return crypto_register_alg(base); 574 return crypto_register_alg(base); 666 } 575 } 667 EXPORT_SYMBOL_GPL(crypto_register_ahash); 576 EXPORT_SYMBOL_GPL(crypto_register_ahash); 668 577 669 void crypto_unregister_ahash(struct ahash_alg !! 578 int crypto_unregister_ahash(struct ahash_alg *alg) 670 { 579 { 671 crypto_unregister_alg(&alg->halg.base) !! 580 return crypto_unregister_alg(&alg->halg.base); 672 } 581 } 673 EXPORT_SYMBOL_GPL(crypto_unregister_ahash); 582 EXPORT_SYMBOL_GPL(crypto_unregister_ahash); 674 583 675 int crypto_register_ahashes(struct ahash_alg * 584 int crypto_register_ahashes(struct ahash_alg *algs, int count) 676 { 585 { 677 int i, ret; 586 int i, ret; 678 587 679 for (i = 0; i < count; i++) { 588 for (i = 0; i < count; i++) { 680 ret = crypto_register_ahash(&a 589 ret = crypto_register_ahash(&algs[i]); 681 if (ret) 590 if (ret) 682 goto err; 591 goto err; 683 } 592 } 684 593 685 return 0; 594 return 0; 686 595 687 err: 596 err: 688 for (--i; i >= 0; --i) 597 for (--i; i >= 0; --i) 689 crypto_unregister_ahash(&algs[ 598 crypto_unregister_ahash(&algs[i]); 690 599 691 return ret; 600 return ret; 692 } 601 } 693 EXPORT_SYMBOL_GPL(crypto_register_ahashes); 602 EXPORT_SYMBOL_GPL(crypto_register_ahashes); 694 603 695 void crypto_unregister_ahashes(struct ahash_al 604 void crypto_unregister_ahashes(struct ahash_alg *algs, int count) 696 { 605 { 697 int i; 606 int i; 698 607 699 for (i = count - 1; i >= 0; --i) 608 for (i = count - 1; i >= 0; --i) 700 crypto_unregister_ahash(&algs[ 609 crypto_unregister_ahash(&algs[i]); 701 } 610 } 702 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); 611 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); 703 612 704 int ahash_register_instance(struct crypto_temp 613 int ahash_register_instance(struct crypto_template *tmpl, 705 struct ahash_insta 614 struct ahash_instance *inst) 706 { 615 { 707 int err; 616 int err; 708 617 709 if (WARN_ON(!inst->free)) << 710 return -EINVAL; << 711 << 712 err = ahash_prepare_alg(&inst->alg); 618 err = ahash_prepare_alg(&inst->alg); 713 if (err) 619 if (err) 714 return err; 620 return err; 715 621 716 return crypto_register_instance(tmpl, 622 return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); 717 } 623 } 718 EXPORT_SYMBOL_GPL(ahash_register_instance); 624 EXPORT_SYMBOL_GPL(ahash_register_instance); >> 625 >> 626 void ahash_free_instance(struct crypto_instance *inst) >> 627 { >> 628 crypto_drop_spawn(crypto_instance_ctx(inst)); >> 629 kfree(ahash_instance(inst)); >> 630 } >> 631 EXPORT_SYMBOL_GPL(ahash_free_instance); >> 632 >> 633 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, >> 634 struct hash_alg_common *alg, >> 635 struct crypto_instance *inst) >> 636 { >> 637 return crypto_init_spawn2(&spawn->base, &alg->base, inst, >> 638 &crypto_ahash_type); >> 639 } >> 640 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); >> 641 >> 642 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) >> 643 { >> 644 struct crypto_alg *alg; >> 645 >> 646 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); >> 647 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); >> 648 } >> 649 EXPORT_SYMBOL_GPL(ahash_attr_alg); >> 650 >> 651 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) >> 652 { >> 653 struct crypto_alg *alg = &halg->base; >> 654 >> 655 if (alg->cra_type != &crypto_ahash_type) >> 656 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); >> 657 >> 658 return __crypto_ahash_alg(alg)->setkey != NULL; >> 659 } >> 660 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); 719 661 720 MODULE_LICENSE("GPL"); 662 MODULE_LICENSE("GPL"); 721 MODULE_DESCRIPTION("Asynchronous cryptographic 663 MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); 722 664
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.