1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * Asynchronous Cryptographic Hash operations. 2 * Asynchronous Cryptographic Hash operations. 4 * 3 * 5 * This is the implementation of the ahash (as !! 4 * This is the asynchronous version of hash.c with notification of 6 * from shash (synchronous hash) in that ahash !! 5 * completion via a callback. 7 * and it hashes data from scatterlists instea << 8 * << 9 * The ahash API provides access to both ahash << 10 * API only provides access to shash algorithm << 11 * 6 * 12 * Copyright (c) 2008 Loc Ho <lho@amcc.com> 7 * Copyright (c) 2008 Loc Ho <lho@amcc.com> >> 8 * >> 9 * This program is free software; you can redistribute it and/or modify it >> 10 * under the terms of the GNU General Public License as published by the Free >> 11 * Software Foundation; either version 2 of the License, or (at your option) >> 12 * any later version. >> 13 * 13 */ 14 */ 14 15 >> 16 #include <crypto/internal/hash.h> 15 #include <crypto/scatterwalk.h> 17 #include <crypto/scatterwalk.h> 16 #include <linux/cryptouser.h> !! 18 #include <linux/bug.h> 17 #include <linux/err.h> 19 #include <linux/err.h> 18 #include <linux/kernel.h> 20 #include <linux/kernel.h> 19 #include <linux/module.h> 21 #include <linux/module.h> 20 #include <linux/sched.h> 22 #include <linux/sched.h> 21 #include <linux/slab.h> 23 #include <linux/slab.h> 22 #include <linux/seq_file.h> 24 #include <linux/seq_file.h> 23 #include <linux/string.h> !! 25 #include <linux/cryptouser.h> 24 #include <net/netlink.h> 26 #include <net/netlink.h> 25 27 26 #include "hash.h" !! 28 #include "internal.h" 27 << 28 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000 << 29 << 30 /* << 31 * For an ahash tfm that is using an shash alg << 32 * algorithm), this returns the underlying sha << 33 */ << 34 static inline struct crypto_shash *ahash_to_sh << 35 { << 36 return *(struct crypto_shash **)crypto << 37 } << 38 << 39 static inline struct shash_desc *prepare_shash << 40 << 41 { << 42 struct shash_desc *desc = ahash_reques << 43 << 44 desc->tfm = ahash_to_shash(tfm); << 45 return desc; << 46 } << 47 << 48 int shash_ahash_update(struct ahash_request *r << 49 { << 50 struct crypto_hash_walk walk; << 51 int nbytes; << 52 << 53 for (nbytes = crypto_hash_walk_first(r << 54 nbytes = crypto_hash_walk_done(&w << 55 nbytes = crypto_shash_update(d << 56 << 57 return nbytes; << 58 } << 59 EXPORT_SYMBOL_GPL(shash_ahash_update); << 60 29 61 int shash_ahash_finup(struct ahash_request *re !! 30 struct ahash_request_priv { 62 { !! 31 crypto_completion_t complete; 63 struct crypto_hash_walk walk; !! 32 void *data; 64 int nbytes; !! 33 u8 *result; 65 !! 34 u32 flags; 66 nbytes = crypto_hash_walk_first(req, & !! 35 void *ubuf[] CRYPTO_MINALIGN_ATTR; 67 if (!nbytes) !! 36 }; 68 return crypto_shash_final(desc << 69 << 70 do { << 71 nbytes = crypto_hash_walk_last << 72 crypto_shash_finup(de << 73 re << 74 crypto_shash_update(d << 75 nbytes = crypto_hash_walk_done << 76 } while (nbytes > 0); << 77 << 78 return nbytes; << 79 } << 80 EXPORT_SYMBOL_GPL(shash_ahash_finup); << 81 << 82 int shash_ahash_digest(struct ahash_request *r << 83 { << 84 unsigned int nbytes = req->nbytes; << 85 struct scatterlist *sg; << 86 unsigned int offset; << 87 int err; << 88 << 89 if (nbytes && << 90 (sg = req->src, offset = sg->offse << 91 nbytes <= min(sg->length, ((unsig << 92 void *data; << 93 << 94 data = kmap_local_page(sg_page << 95 err = crypto_shash_digest(desc << 96 req- << 97 kunmap_local(data); << 98 } else << 99 err = crypto_shash_init(desc) << 100 shash_ahash_finup(req, d << 101 << 102 return err; << 103 } << 104 EXPORT_SYMBOL_GPL(shash_ahash_digest); << 105 37 106 static void crypto_exit_ahash_using_shash(stru !! 38 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) 107 { 39 { 108 struct crypto_shash **ctx = crypto_tfm !! 40 return container_of(crypto_hash_alg_common(hash), struct ahash_alg, 109 !! 41 halg); 110 crypto_free_shash(*ctx); << 111 } << 112 << 113 static int crypto_init_ahash_using_shash(struc << 114 { << 115 struct crypto_alg *calg = tfm->__crt_a << 116 struct crypto_ahash *crt = __crypto_ah << 117 struct crypto_shash **ctx = crypto_tfm << 118 struct crypto_shash *shash; << 119 << 120 if (!crypto_mod_get(calg)) << 121 return -EAGAIN; << 122 << 123 shash = crypto_create_tfm(calg, &crypt << 124 if (IS_ERR(shash)) { << 125 crypto_mod_put(calg); << 126 return PTR_ERR(shash); << 127 } << 128 << 129 crt->using_shash = true; << 130 *ctx = shash; << 131 tfm->exit = crypto_exit_ahash_using_sh << 132 << 133 crypto_ahash_set_flags(crt, crypto_sha << 134 CRYPTO_TFM << 135 crt->reqsize = sizeof(struct shash_des << 136 << 137 return 0; << 138 } 42 } 139 43 140 static int hash_walk_next(struct crypto_hash_w 44 static int hash_walk_next(struct crypto_hash_walk *walk) 141 { 45 { >> 46 unsigned int alignmask = walk->alignmask; 142 unsigned int offset = walk->offset; 47 unsigned int offset = walk->offset; 143 unsigned int nbytes = min(walk->entryl 48 unsigned int nbytes = min(walk->entrylen, 144 ((unsigned i 49 ((unsigned int)(PAGE_SIZE)) - offset); 145 50 146 walk->data = kmap_local_page(walk->pg) !! 51 if (walk->flags & CRYPTO_ALG_ASYNC) >> 52 walk->data = kmap(walk->pg); >> 53 else >> 54 walk->data = kmap_atomic(walk->pg); 147 walk->data += offset; 55 walk->data += offset; >> 56 >> 57 if (offset & alignmask) { >> 58 unsigned int unaligned = alignmask + 1 - (offset & alignmask); >> 59 >> 60 if (nbytes > unaligned) >> 61 nbytes = unaligned; >> 62 } >> 63 148 walk->entrylen -= nbytes; 64 walk->entrylen -= nbytes; 149 return nbytes; 65 return nbytes; 150 } 66 } 151 67 152 static int hash_walk_new_entry(struct crypto_h 68 static int hash_walk_new_entry(struct crypto_hash_walk *walk) 153 { 69 { 154 struct scatterlist *sg; 70 struct scatterlist *sg; 155 71 156 sg = walk->sg; 72 sg = walk->sg; 157 walk->offset = sg->offset; 73 walk->offset = sg->offset; 158 walk->pg = sg_page(walk->sg) + (walk-> 74 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 159 walk->offset = offset_in_page(walk->of 75 walk->offset = offset_in_page(walk->offset); 160 walk->entrylen = sg->length; 76 walk->entrylen = sg->length; 161 77 162 if (walk->entrylen > walk->total) 78 if (walk->entrylen > walk->total) 163 walk->entrylen = walk->total; 79 walk->entrylen = walk->total; 164 walk->total -= walk->entrylen; 80 walk->total -= walk->entrylen; 165 81 166 return hash_walk_next(walk); 82 return hash_walk_next(walk); 167 } 83 } 168 84 169 int crypto_hash_walk_done(struct crypto_hash_w 85 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) 170 { 86 { >> 87 unsigned int alignmask = walk->alignmask; >> 88 unsigned int nbytes = walk->entrylen; >> 89 171 walk->data -= walk->offset; 90 walk->data -= walk->offset; 172 91 173 kunmap_local(walk->data); !! 92 if (nbytes && walk->offset & alignmask && !err) { 174 crypto_yield(walk->flags); !! 93 walk->offset = ALIGN(walk->offset, alignmask + 1); >> 94 walk->data += walk->offset; >> 95 >> 96 nbytes = min(nbytes, >> 97 ((unsigned int)(PAGE_SIZE)) - walk->offset); >> 98 walk->entrylen -= nbytes; >> 99 >> 100 return nbytes; >> 101 } >> 102 >> 103 if (walk->flags & CRYPTO_ALG_ASYNC) >> 104 kunmap(walk->pg); >> 105 else { >> 106 kunmap_atomic(walk->data); >> 107 /* >> 108 * The may sleep test only makes sense for sync users. >> 109 * Async users don't need to sleep here anyway. >> 110 */ >> 111 crypto_yield(walk->flags); >> 112 } 175 113 176 if (err) 114 if (err) 177 return err; 115 return err; 178 116 179 if (walk->entrylen) { !! 117 if (nbytes) { 180 walk->offset = 0; 118 walk->offset = 0; 181 walk->pg++; 119 walk->pg++; 182 return hash_walk_next(walk); 120 return hash_walk_next(walk); 183 } 121 } 184 122 185 if (!walk->total) 123 if (!walk->total) 186 return 0; 124 return 0; 187 125 188 walk->sg = sg_next(walk->sg); 126 walk->sg = sg_next(walk->sg); 189 127 190 return hash_walk_new_entry(walk); 128 return hash_walk_new_entry(walk); 191 } 129 } 192 EXPORT_SYMBOL_GPL(crypto_hash_walk_done); 130 EXPORT_SYMBOL_GPL(crypto_hash_walk_done); 193 131 194 int crypto_hash_walk_first(struct ahash_reques 132 int crypto_hash_walk_first(struct ahash_request *req, 195 struct crypto_hash_ 133 struct crypto_hash_walk *walk) 196 { 134 { 197 walk->total = req->nbytes; 135 walk->total = req->nbytes; 198 136 199 if (!walk->total) { 137 if (!walk->total) { 200 walk->entrylen = 0; 138 walk->entrylen = 0; 201 return 0; 139 return 0; 202 } 140 } 203 141 >> 142 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); 204 walk->sg = req->src; 143 walk->sg = req->src; 205 walk->flags = req->base.flags; !! 144 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; 206 145 207 return hash_walk_new_entry(walk); 146 return hash_walk_new_entry(walk); 208 } 147 } 209 EXPORT_SYMBOL_GPL(crypto_hash_walk_first); 148 EXPORT_SYMBOL_GPL(crypto_hash_walk_first); 210 149 211 static int ahash_nosetkey(struct crypto_ahash !! 150 int crypto_ahash_walk_first(struct ahash_request *req, 212 unsigned int keylen) !! 151 struct crypto_hash_walk *walk) 213 { 152 { 214 return -ENOSYS; !! 153 walk->total = req->nbytes; >> 154 >> 155 if (!walk->total) { >> 156 walk->entrylen = 0; >> 157 return 0; >> 158 } >> 159 >> 160 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); >> 161 walk->sg = req->src; >> 162 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; >> 163 walk->flags |= CRYPTO_ALG_ASYNC; >> 164 >> 165 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); >> 166 >> 167 return hash_walk_new_entry(walk); 215 } 168 } >> 169 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); 216 170 217 static void ahash_set_needkey(struct crypto_ah !! 171 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, >> 172 unsigned int keylen) 218 { 173 { 219 if (alg->setkey != ahash_nosetkey && !! 174 unsigned long alignmask = crypto_ahash_alignmask(tfm); 220 !(alg->halg.base.cra_flags & CRYPT !! 175 int ret; 221 crypto_ahash_set_flags(tfm, CR !! 176 u8 *buffer, *alignbuffer; >> 177 unsigned long absize; >> 178 >> 179 absize = keylen + alignmask; >> 180 buffer = kmalloc(absize, GFP_KERNEL); >> 181 if (!buffer) >> 182 return -ENOMEM; >> 183 >> 184 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); >> 185 memcpy(alignbuffer, key, keylen); >> 186 ret = tfm->setkey(tfm, alignbuffer, keylen); >> 187 kzfree(buffer); >> 188 return ret; 222 } 189 } 223 190 224 int crypto_ahash_setkey(struct crypto_ahash *t 191 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 225 unsigned int keylen) 192 unsigned int keylen) 226 { 193 { 227 if (likely(tfm->using_shash)) { !! 194 unsigned long alignmask = crypto_ahash_alignmask(tfm); 228 struct crypto_shash *shash = a !! 195 229 int err; !! 196 if ((unsigned long)key & alignmask) 230 !! 197 return ahash_setkey_unaligned(tfm, key, keylen); 231 err = crypto_shash_setkey(shas !! 198 232 if (unlikely(err)) { !! 199 return tfm->setkey(tfm, key, keylen); 233 crypto_ahash_set_flags << 234 << 235 << 236 return err; << 237 } << 238 } else { << 239 struct ahash_alg *alg = crypto << 240 int err; << 241 << 242 err = alg->setkey(tfm, key, ke << 243 if (unlikely(err)) { << 244 ahash_set_needkey(tfm, << 245 return err; << 246 } << 247 } << 248 crypto_ahash_clear_flags(tfm, CRYPTO_T << 249 return 0; << 250 } 200 } 251 EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 201 EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 252 202 253 int crypto_ahash_init(struct ahash_request *re !! 203 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, >> 204 unsigned int keylen) 254 { 205 { 255 struct crypto_ahash *tfm = crypto_ahas !! 206 return -ENOSYS; >> 207 } 256 208 257 if (likely(tfm->using_shash)) !! 209 static inline unsigned int ahash_align_buffer_size(unsigned len, 258 return crypto_shash_init(prepa !! 210 unsigned long mask) 259 if (crypto_ahash_get_flags(tfm) & CRYP !! 211 { 260 return -ENOKEY; !! 212 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); 261 return crypto_ahash_alg(tfm)->init(req << 262 } 213 } 263 EXPORT_SYMBOL_GPL(crypto_ahash_init); << 264 214 265 static int ahash_save_req(struct ahash_request !! 215 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) 266 bool has_state) << 267 { 216 { 268 struct crypto_ahash *tfm = crypto_ahas 217 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); >> 218 unsigned long alignmask = crypto_ahash_alignmask(tfm); 269 unsigned int ds = crypto_ahash_digests 219 unsigned int ds = crypto_ahash_digestsize(tfm); 270 struct ahash_request *subreq; !! 220 struct ahash_request_priv *priv; 271 unsigned int subreq_size; << 272 unsigned int reqsize; << 273 u8 *result; << 274 gfp_t gfp; << 275 u32 flags; << 276 221 277 subreq_size = sizeof(*subreq); !! 222 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), 278 reqsize = crypto_ahash_reqsize(tfm); !! 223 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 279 reqsize = ALIGN(reqsize, crypto_tfm_ct !! 224 GFP_KERNEL : GFP_ATOMIC); 280 subreq_size += reqsize; !! 225 if (!priv) 281 subreq_size += ds; << 282 << 283 flags = ahash_request_flags(req); << 284 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEE << 285 subreq = kmalloc(subreq_size, gfp); << 286 if (!subreq) << 287 return -ENOMEM; 226 return -ENOMEM; 288 227 289 ahash_request_set_tfm(subreq, tfm); !! 228 /* 290 ahash_request_set_callback(subreq, fla !! 229 * WARNING: Voodoo programming below! 291 !! 230 * 292 result = (u8 *)(subreq + 1) + reqsize; !! 231 * The code below is obscure and hard to understand, thus explanation 293 !! 232 * is necessary. See include/crypto/hash.h and include/linux/crypto.h 294 ahash_request_set_crypt(subreq, req->s !! 233 * to understand the layout of structures used here! 295 !! 234 * 296 if (has_state) { !! 235 * The code here will replace portions of the ORIGINAL request with 297 void *state; !! 236 * pointers to new code and buffers so the hashing operation can store 298 !! 237 * the result in aligned buffer. We will call the modified request 299 state = kmalloc(crypto_ahash_s !! 238 * an ADJUSTED request. 300 if (!state) { !! 239 * 301 kfree(subreq); !! 240 * The newly mangled request will look as such: 302 return -ENOMEM; !! 241 * 303 } !! 242 * req { 304 !! 243 * .result = ADJUSTED[new aligned buffer] 305 crypto_ahash_export(req, state !! 244 * .base.complete = ADJUSTED[pointer to completion function] 306 crypto_ahash_import(subreq, st !! 245 * .base.data = ADJUSTED[*req (pointer to self)] 307 kfree_sensitive(state); !! 246 * .priv = ADJUSTED[new priv] { 308 } !! 247 * .result = ORIGINAL(result) 309 !! 248 * .complete = ORIGINAL(base.complete) 310 req->priv = subreq; !! 249 * .data = ORIGINAL(base.data) >> 250 * } >> 251 */ >> 252 >> 253 priv->result = req->result; >> 254 priv->complete = req->base.complete; >> 255 priv->data = req->base.data; >> 256 priv->flags = req->base.flags; >> 257 >> 258 /* >> 259 * WARNING: We do not backup req->priv here! The req->priv >> 260 * is for internal use of the Crypto API and the >> 261 * user must _NOT_ _EVER_ depend on it's content! >> 262 */ >> 263 >> 264 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); >> 265 req->base.complete = cplt; >> 266 req->base.data = req; >> 267 req->priv = priv; 311 268 312 return 0; 269 return 0; 313 } 270 } 314 271 315 static void ahash_restore_req(struct ahash_req 272 static void ahash_restore_req(struct ahash_request *req, int err) 316 { 273 { 317 struct ahash_request *subreq = req->pr !! 274 struct ahash_request_priv *priv = req->priv; 318 275 319 if (!err) 276 if (!err) 320 memcpy(req->result, subreq->re !! 277 memcpy(priv->result, req->result, 321 crypto_ahash_digestsize 278 crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 322 279 >> 280 /* Restore the original crypto request. */ >> 281 req->result = priv->result; >> 282 >> 283 ahash_request_set_callback(req, priv->flags, >> 284 priv->complete, priv->data); 323 req->priv = NULL; 285 req->priv = NULL; 324 286 325 kfree_sensitive(subreq); !! 287 /* Free the req->priv.priv from the ADJUSTED request. */ >> 288 kzfree(priv); 326 } 289 } 327 290 328 int crypto_ahash_update(struct ahash_request * !! 291 static void ahash_notify_einprogress(struct ahash_request *req) 329 { 292 { 330 struct crypto_ahash *tfm = crypto_ahas !! 293 struct ahash_request_priv *priv = req->priv; >> 294 struct crypto_async_request oreq; 331 295 332 if (likely(tfm->using_shash)) !! 296 oreq.data = priv->data; 333 return shash_ahash_update(req, << 334 297 335 return crypto_ahash_alg(tfm)->update(r !! 298 priv->complete(&oreq, -EINPROGRESS); 336 } 299 } 337 EXPORT_SYMBOL_GPL(crypto_ahash_update); << 338 300 339 int crypto_ahash_final(struct ahash_request *r !! 301 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) 340 { 302 { 341 struct crypto_ahash *tfm = crypto_ahas !! 303 struct ahash_request *areq = req->data; 342 304 343 if (likely(tfm->using_shash)) !! 305 if (err == -EINPROGRESS) { 344 return crypto_shash_final(ahas !! 306 ahash_notify_einprogress(areq); >> 307 return; >> 308 } 345 309 346 return crypto_ahash_alg(tfm)->final(re !! 310 /* >> 311 * Restore the original request, see ahash_op_unaligned() for what >> 312 * goes where. >> 313 * >> 314 * The "struct ahash_request *req" here is in fact the "req.base" >> 315 * from the ADJUSTED request from ahash_op_unaligned(), thus as it >> 316 * is a pointer to self, it is also the ADJUSTED "req" . >> 317 */ >> 318 >> 319 /* First copy req->result into req->priv.result */ >> 320 ahash_restore_req(areq, err); >> 321 >> 322 /* Complete the ORIGINAL request. */ >> 323 areq->base.complete(&areq->base, err); 347 } 324 } 348 EXPORT_SYMBOL_GPL(crypto_ahash_final); << 349 325 350 int crypto_ahash_finup(struct ahash_request *r !! 326 static int ahash_op_unaligned(struct ahash_request *req, >> 327 int (*op)(struct ahash_request *)) 351 { 328 { 352 struct crypto_ahash *tfm = crypto_ahas !! 329 int err; >> 330 >> 331 err = ahash_save_req(req, ahash_op_unaligned_done); >> 332 if (err) >> 333 return err; >> 334 >> 335 err = op(req); >> 336 if (err == -EINPROGRESS || >> 337 (err == -EBUSY && (ahash_request_flags(req) & >> 338 CRYPTO_TFM_REQ_MAY_BACKLOG))) >> 339 return err; 353 340 354 if (likely(tfm->using_shash)) !! 341 ahash_restore_req(req, err); 355 return shash_ahash_finup(req, << 356 342 357 return crypto_ahash_alg(tfm)->finup(re !! 343 return err; 358 } 344 } 359 EXPORT_SYMBOL_GPL(crypto_ahash_finup); << 360 345 361 int crypto_ahash_digest(struct ahash_request * !! 346 static int crypto_ahash_op(struct ahash_request *req, >> 347 int (*op)(struct ahash_request *)) 362 { 348 { 363 struct crypto_ahash *tfm = crypto_ahas 349 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); >> 350 unsigned long alignmask = crypto_ahash_alignmask(tfm); >> 351 >> 352 if ((unsigned long)req->result & alignmask) >> 353 return ahash_op_unaligned(req, op); >> 354 >> 355 return op(req); >> 356 } 364 357 365 if (likely(tfm->using_shash)) !! 358 int crypto_ahash_final(struct ahash_request *req) 366 return shash_ahash_digest(req, !! 359 { >> 360 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); >> 361 } >> 362 EXPORT_SYMBOL_GPL(crypto_ahash_final); 367 363 368 if (crypto_ahash_get_flags(tfm) & CRYP !! 364 int crypto_ahash_finup(struct ahash_request *req) 369 return -ENOKEY; !! 365 { >> 366 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); >> 367 } >> 368 EXPORT_SYMBOL_GPL(crypto_ahash_finup); 370 369 371 return crypto_ahash_alg(tfm)->digest(r !! 370 int crypto_ahash_digest(struct ahash_request *req) >> 371 { >> 372 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); 372 } 373 } 373 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 374 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 374 375 375 static void ahash_def_finup_done2(void *data, !! 376 static void ahash_def_finup_done2(struct crypto_async_request *req, int err) 376 { 377 { 377 struct ahash_request *areq = data; !! 378 struct ahash_request *areq = req->data; 378 379 379 if (err == -EINPROGRESS) 380 if (err == -EINPROGRESS) 380 return; 381 return; 381 382 382 ahash_restore_req(areq, err); 383 ahash_restore_req(areq, err); 383 384 384 ahash_request_complete(areq, err); !! 385 areq->base.complete(&areq->base, err); 385 } 386 } 386 387 387 static int ahash_def_finup_finish1(struct ahas 388 static int ahash_def_finup_finish1(struct ahash_request *req, int err) 388 { 389 { 389 struct ahash_request *subreq = req->pr << 390 << 391 if (err) 390 if (err) 392 goto out; 391 goto out; 393 392 394 subreq->base.complete = ahash_def_finu !! 393 req->base.complete = ahash_def_finup_done2; 395 394 396 err = crypto_ahash_alg(crypto_ahash_re !! 395 err = crypto_ahash_reqtfm(req)->final(req); 397 if (err == -EINPROGRESS || err == -EBU !! 396 if (err == -EINPROGRESS || >> 397 (err == -EBUSY && (ahash_request_flags(req) & >> 398 CRYPTO_TFM_REQ_MAY_BACKLOG))) 398 return err; 399 return err; 399 400 400 out: 401 out: 401 ahash_restore_req(req, err); 402 ahash_restore_req(req, err); 402 return err; 403 return err; 403 } 404 } 404 405 405 static void ahash_def_finup_done1(void *data, !! 406 static void ahash_def_finup_done1(struct crypto_async_request *req, int err) 406 { 407 { 407 struct ahash_request *areq = data; !! 408 struct ahash_request *areq = req->data; 408 struct ahash_request *subreq; << 409 409 410 if (err == -EINPROGRESS) !! 410 if (err == -EINPROGRESS) { 411 goto out; !! 411 ahash_notify_einprogress(areq); >> 412 return; >> 413 } 412 414 413 subreq = areq->priv; !! 415 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 414 subreq->base.flags &= CRYPTO_TFM_REQ_M << 415 416 416 err = ahash_def_finup_finish1(areq, er 417 err = ahash_def_finup_finish1(areq, err); 417 if (err == -EINPROGRESS || err == -EBU !! 418 if (areq->priv) 418 return; 419 return; 419 420 420 out: !! 421 areq->base.complete(&areq->base, err); 421 ahash_request_complete(areq, err); << 422 } 422 } 423 423 424 static int ahash_def_finup(struct ahash_reques 424 static int ahash_def_finup(struct ahash_request *req) 425 { 425 { 426 struct crypto_ahash *tfm = crypto_ahas 426 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 427 int err; 427 int err; 428 428 429 err = ahash_save_req(req, ahash_def_fi !! 429 err = ahash_save_req(req, ahash_def_finup_done1); 430 if (err) 430 if (err) 431 return err; 431 return err; 432 432 433 err = crypto_ahash_alg(tfm)->update(re !! 433 err = tfm->update(req); 434 if (err == -EINPROGRESS || err == -EBU !! 434 if (err == -EINPROGRESS || >> 435 (err == -EBUSY && (ahash_request_flags(req) & >> 436 CRYPTO_TFM_REQ_MAY_BACKLOG))) 435 return err; 437 return err; 436 438 437 return ahash_def_finup_finish1(req, er 439 return ahash_def_finup_finish1(req, err); 438 } 440 } 439 441 440 int crypto_ahash_export(struct ahash_request * !! 442 static int ahash_no_export(struct ahash_request *req, void *out) 441 { 443 { 442 struct crypto_ahash *tfm = crypto_ahas !! 444 return -ENOSYS; 443 << 444 if (likely(tfm->using_shash)) << 445 return crypto_shash_export(aha << 446 return crypto_ahash_alg(tfm)->export(r << 447 } << 448 EXPORT_SYMBOL_GPL(crypto_ahash_export); << 449 << 450 int crypto_ahash_import(struct ahash_request * << 451 { << 452 struct crypto_ahash *tfm = crypto_ahas << 453 << 454 if (likely(tfm->using_shash)) << 455 return crypto_shash_import(pre << 456 if (crypto_ahash_get_flags(tfm) & CRYP << 457 return -ENOKEY; << 458 return crypto_ahash_alg(tfm)->import(r << 459 } 445 } 460 EXPORT_SYMBOL_GPL(crypto_ahash_import); << 461 446 462 static void crypto_ahash_exit_tfm(struct crypt !! 447 static int ahash_no_import(struct ahash_request *req, const void *in) 463 { 448 { 464 struct crypto_ahash *hash = __crypto_a !! 449 return -ENOSYS; 465 struct ahash_alg *alg = crypto_ahash_a << 466 << 467 alg->exit_tfm(hash); << 468 } 450 } 469 451 470 static int crypto_ahash_init_tfm(struct crypto 452 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) 471 { 453 { 472 struct crypto_ahash *hash = __crypto_a 454 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 473 struct ahash_alg *alg = crypto_ahash_a 455 struct ahash_alg *alg = crypto_ahash_alg(hash); 474 456 475 crypto_ahash_set_statesize(hash, alg-> !! 457 hash->setkey = ahash_nosetkey; 476 !! 458 hash->has_setkey = false; 477 if (tfm->__crt_alg->cra_type == &crypt !! 459 hash->export = ahash_no_export; 478 return crypto_init_ahash_using !! 460 hash->import = ahash_no_import; 479 !! 461 480 ahash_set_needkey(hash, alg); !! 462 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 481 !! 463 return crypto_init_shash_ops_async(tfm); 482 if (alg->exit_tfm) !! 464 483 tfm->exit = crypto_ahash_exit_ !! 465 hash->init = alg->init; >> 466 hash->update = alg->update; >> 467 hash->final = alg->final; >> 468 hash->finup = alg->finup ?: ahash_def_finup; >> 469 hash->digest = alg->digest; >> 470 >> 471 if (alg->setkey) { >> 472 hash->setkey = alg->setkey; >> 473 hash->has_setkey = true; >> 474 } >> 475 if (alg->export) >> 476 hash->export = alg->export; >> 477 if (alg->import) >> 478 hash->import = alg->import; 484 479 485 return alg->init_tfm ? alg->init_tfm(h !! 480 return 0; 486 } 481 } 487 482 488 static unsigned int crypto_ahash_extsize(struc 483 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) 489 { 484 { 490 if (alg->cra_type == &crypto_shash_typ !! 485 if (alg->cra_type != &crypto_ahash_type) 491 return sizeof(struct crypto_sh 486 return sizeof(struct crypto_shash *); 492 487 493 return crypto_alg_extsize(alg); 488 return crypto_alg_extsize(alg); 494 } 489 } 495 490 496 static void crypto_ahash_free_instance(struct !! 491 #ifdef CONFIG_NET 497 { !! 492 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) 498 struct ahash_instance *ahash = ahash_i << 499 << 500 ahash->free(ahash); << 501 } << 502 << 503 static int __maybe_unused crypto_ahash_report( << 504 struct sk_buff *skb, struct crypto_alg << 505 { 493 { 506 struct crypto_report_hash rhash; 494 struct crypto_report_hash rhash; 507 495 508 memset(&rhash, 0, sizeof(rhash)); !! 496 strncpy(rhash.type, "ahash", sizeof(rhash.type)); 509 << 510 strscpy(rhash.type, "ahash", sizeof(rh << 511 497 512 rhash.blocksize = alg->cra_blocksize; 498 rhash.blocksize = alg->cra_blocksize; 513 rhash.digestsize = __crypto_hash_alg_c 499 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; 514 500 515 return nla_put(skb, CRYPTOCFGA_REPORT_ !! 501 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, >> 502 sizeof(struct crypto_report_hash), &rhash)) >> 503 goto nla_put_failure; >> 504 return 0; >> 505 >> 506 nla_put_failure: >> 507 return -EMSGSIZE; 516 } 508 } >> 509 #else >> 510 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) >> 511 { >> 512 return -ENOSYS; >> 513 } >> 514 #endif 517 515 518 static void crypto_ahash_show(struct seq_file 516 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 519 __maybe_unused; !! 517 __attribute__ ((unused)); 520 static void crypto_ahash_show(struct seq_file 518 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 521 { 519 { 522 seq_printf(m, "type : ahash\n" 520 seq_printf(m, "type : ahash\n"); 523 seq_printf(m, "async : %s\n", a 521 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 524 " 522 "yes" : "no"); 525 seq_printf(m, "blocksize : %u\n", a 523 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 526 seq_printf(m, "digestsize : %u\n", 524 seq_printf(m, "digestsize : %u\n", 527 __crypto_hash_alg_common(al 525 __crypto_hash_alg_common(alg)->digestsize); 528 } 526 } 529 527 530 static const struct crypto_type crypto_ahash_t !! 528 const struct crypto_type crypto_ahash_type = { 531 .extsize = crypto_ahash_extsize, 529 .extsize = crypto_ahash_extsize, 532 .init_tfm = crypto_ahash_init_tfm, 530 .init_tfm = crypto_ahash_init_tfm, 533 .free = crypto_ahash_free_instance, << 534 #ifdef CONFIG_PROC_FS 531 #ifdef CONFIG_PROC_FS 535 .show = crypto_ahash_show, 532 .show = crypto_ahash_show, 536 #endif 533 #endif 537 #if IS_ENABLED(CONFIG_CRYPTO_USER) << 538 .report = crypto_ahash_report, 534 .report = crypto_ahash_report, 539 #endif << 540 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 535 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 541 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 536 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 542 .type = CRYPTO_ALG_TYPE_AHASH, 537 .type = CRYPTO_ALG_TYPE_AHASH, 543 .tfmsize = offsetof(struct crypto_ahas 538 .tfmsize = offsetof(struct crypto_ahash, base), 544 }; 539 }; 545 !! 540 EXPORT_SYMBOL_GPL(crypto_ahash_type); 546 int crypto_grab_ahash(struct crypto_ahash_spaw << 547 struct crypto_instance * << 548 const char *name, u32 ty << 549 { << 550 spawn->base.frontend = &crypto_ahash_t << 551 return crypto_grab_spawn(&spawn->base, << 552 } << 553 EXPORT_SYMBOL_GPL(crypto_grab_ahash); << 554 541 555 struct crypto_ahash *crypto_alloc_ahash(const 542 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, 556 u32 ma 543 u32 mask) 557 { 544 { 558 return crypto_alloc_tfm(alg_name, &cry 545 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); 559 } 546 } 560 EXPORT_SYMBOL_GPL(crypto_alloc_ahash); 547 EXPORT_SYMBOL_GPL(crypto_alloc_ahash); 561 548 562 int crypto_has_ahash(const char *alg_name, u32 549 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) 563 { 550 { 564 return crypto_type_has_alg(alg_name, & 551 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); 565 } 552 } 566 EXPORT_SYMBOL_GPL(crypto_has_ahash); 553 EXPORT_SYMBOL_GPL(crypto_has_ahash); 567 554 568 static bool crypto_hash_alg_has_setkey(struct << 569 { << 570 struct crypto_alg *alg = &halg->base; << 571 << 572 if (alg->cra_type == &crypto_shash_typ << 573 return crypto_shash_alg_has_se << 574 << 575 return __crypto_ahash_alg(alg)->setkey << 576 } << 577 << 578 struct crypto_ahash *crypto_clone_ahash(struct << 579 { << 580 struct hash_alg_common *halg = crypto_ << 581 struct crypto_tfm *tfm = crypto_ahash_ << 582 struct crypto_ahash *nhash; << 583 struct ahash_alg *alg; << 584 int err; << 585 << 586 if (!crypto_hash_alg_has_setkey(halg)) << 587 tfm = crypto_tfm_get(tfm); << 588 if (IS_ERR(tfm)) << 589 return ERR_CAST(tfm); << 590 << 591 return hash; << 592 } << 593 << 594 nhash = crypto_clone_tfm(&crypto_ahash << 595 << 596 if (IS_ERR(nhash)) << 597 return nhash; << 598 << 599 nhash->reqsize = hash->reqsize; << 600 nhash->statesize = hash->statesize; << 601 << 602 if (likely(hash->using_shash)) { << 603 struct crypto_shash **nctx = c << 604 struct crypto_shash *shash; << 605 << 606 shash = crypto_clone_shash(aha << 607 if (IS_ERR(shash)) { << 608 err = PTR_ERR(shash); << 609 goto out_free_nhash; << 610 } << 611 nhash->using_shash = true; << 612 *nctx = shash; << 613 return nhash; << 614 } << 615 << 616 err = -ENOSYS; << 617 alg = crypto_ahash_alg(hash); << 618 if (!alg->clone_tfm) << 619 goto out_free_nhash; << 620 << 621 err = alg->clone_tfm(nhash, hash); << 622 if (err) << 623 goto out_free_nhash; << 624 << 625 return nhash; << 626 << 627 out_free_nhash: << 628 crypto_free_ahash(nhash); << 629 return ERR_PTR(err); << 630 } << 631 EXPORT_SYMBOL_GPL(crypto_clone_ahash); << 632 << 633 static int ahash_prepare_alg(struct ahash_alg 555 static int ahash_prepare_alg(struct ahash_alg *alg) 634 { 556 { 635 struct crypto_alg *base = &alg->halg.b 557 struct crypto_alg *base = &alg->halg.base; 636 int err; << 637 558 638 if (alg->halg.statesize == 0) !! 559 if (alg->halg.digestsize > PAGE_SIZE / 8 || >> 560 alg->halg.statesize > PAGE_SIZE / 8 || >> 561 alg->halg.statesize == 0) 639 return -EINVAL; 562 return -EINVAL; 640 563 641 err = hash_prepare_alg(&alg->halg); << 642 if (err) << 643 return err; << 644 << 645 base->cra_type = &crypto_ahash_type; 564 base->cra_type = &crypto_ahash_type; >> 565 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 646 base->cra_flags |= CRYPTO_ALG_TYPE_AHA 566 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; 647 567 648 if (!alg->finup) << 649 alg->finup = ahash_def_finup; << 650 if (!alg->setkey) << 651 alg->setkey = ahash_nosetkey; << 652 << 653 return 0; 568 return 0; 654 } 569 } 655 570 656 int crypto_register_ahash(struct ahash_alg *al 571 int crypto_register_ahash(struct ahash_alg *alg) 657 { 572 { 658 struct crypto_alg *base = &alg->halg.b 573 struct crypto_alg *base = &alg->halg.base; 659 int err; 574 int err; 660 575 661 err = ahash_prepare_alg(alg); 576 err = ahash_prepare_alg(alg); 662 if (err) 577 if (err) 663 return err; 578 return err; 664 579 665 return crypto_register_alg(base); 580 return crypto_register_alg(base); 666 } 581 } 667 EXPORT_SYMBOL_GPL(crypto_register_ahash); 582 EXPORT_SYMBOL_GPL(crypto_register_ahash); 668 583 669 void crypto_unregister_ahash(struct ahash_alg !! 584 int crypto_unregister_ahash(struct ahash_alg *alg) 670 { 585 { 671 crypto_unregister_alg(&alg->halg.base) !! 586 return crypto_unregister_alg(&alg->halg.base); 672 } 587 } 673 EXPORT_SYMBOL_GPL(crypto_unregister_ahash); 588 EXPORT_SYMBOL_GPL(crypto_unregister_ahash); 674 589 675 int crypto_register_ahashes(struct ahash_alg * << 676 { << 677 int i, ret; << 678 << 679 for (i = 0; i < count; i++) { << 680 ret = crypto_register_ahash(&a << 681 if (ret) << 682 goto err; << 683 } << 684 << 685 return 0; << 686 << 687 err: << 688 for (--i; i >= 0; --i) << 689 crypto_unregister_ahash(&algs[ << 690 << 691 return ret; << 692 } << 693 EXPORT_SYMBOL_GPL(crypto_register_ahashes); << 694 << 695 void crypto_unregister_ahashes(struct ahash_al << 696 { << 697 int i; << 698 << 699 for (i = count - 1; i >= 0; --i) << 700 crypto_unregister_ahash(&algs[ << 701 } << 702 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); << 703 << 704 int ahash_register_instance(struct crypto_temp 590 int ahash_register_instance(struct crypto_template *tmpl, 705 struct ahash_insta 591 struct ahash_instance *inst) 706 { 592 { 707 int err; 593 int err; 708 594 709 if (WARN_ON(!inst->free)) << 710 return -EINVAL; << 711 << 712 err = ahash_prepare_alg(&inst->alg); 595 err = ahash_prepare_alg(&inst->alg); 713 if (err) 596 if (err) 714 return err; 597 return err; 715 598 716 return crypto_register_instance(tmpl, 599 return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); 717 } 600 } 718 EXPORT_SYMBOL_GPL(ahash_register_instance); 601 EXPORT_SYMBOL_GPL(ahash_register_instance); >> 602 >> 603 void ahash_free_instance(struct crypto_instance *inst) >> 604 { >> 605 crypto_drop_spawn(crypto_instance_ctx(inst)); >> 606 kfree(ahash_instance(inst)); >> 607 } >> 608 EXPORT_SYMBOL_GPL(ahash_free_instance); >> 609 >> 610 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, >> 611 struct hash_alg_common *alg, >> 612 struct crypto_instance *inst) >> 613 { >> 614 return crypto_init_spawn2(&spawn->base, &alg->base, inst, >> 615 &crypto_ahash_type); >> 616 } >> 617 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); >> 618 >> 619 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) >> 620 { >> 621 struct crypto_alg *alg; >> 622 >> 623 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); >> 624 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); >> 625 } >> 626 EXPORT_SYMBOL_GPL(ahash_attr_alg); 719 627 720 MODULE_LICENSE("GPL"); 628 MODULE_LICENSE("GPL"); 721 MODULE_DESCRIPTION("Asynchronous cryptographic 629 MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); 722 630
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.