1 // SPDX-License-Identifier: GPL-2.0-or-later 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 2 /* 3 * Handle async block request by crypto hardwa 3 * Handle async block request by crypto hardware engine. 4 * 4 * 5 * Copyright (C) 2016 Linaro, Inc. 5 * Copyright (C) 2016 Linaro, Inc. 6 * 6 * 7 * Author: Baolin Wang <baolin.wang@linaro.org 7 * Author: Baolin Wang <baolin.wang@linaro.org> 8 */ 8 */ 9 9 10 #include <crypto/internal/aead.h> << 11 #include <crypto/internal/akcipher.h> << 12 #include <crypto/internal/engine.h> << 13 #include <crypto/internal/hash.h> << 14 #include <crypto/internal/kpp.h> << 15 #include <crypto/internal/skcipher.h> << 16 #include <linux/err.h> 10 #include <linux/err.h> 17 #include <linux/delay.h> 11 #include <linux/delay.h> 18 #include <linux/device.h> 12 #include <linux/device.h> 19 #include <linux/kernel.h> !! 13 #include <crypto/engine.h> 20 #include <linux/module.h> << 21 #include <uapi/linux/sched/types.h> 14 #include <uapi/linux/sched/types.h> 22 #include "internal.h" 15 #include "internal.h" 23 16 24 #define CRYPTO_ENGINE_MAX_QLEN 10 17 #define CRYPTO_ENGINE_MAX_QLEN 10 25 18 26 /* Temporary algorithm flag used to indicate a << 27 #define CRYPTO_ALG_ENGINE 0x200 << 28 << 29 struct crypto_engine_alg { << 30 struct crypto_alg base; << 31 struct crypto_engine_op op; << 32 }; << 33 << 34 /** 19 /** 35 * crypto_finalize_request - finalize one requ 20 * crypto_finalize_request - finalize one request if the request is done 36 * @engine: the hardware engine 21 * @engine: the hardware engine 37 * @req: the request need to be finalized 22 * @req: the request need to be finalized 38 * @err: error number 23 * @err: error number 39 */ 24 */ 40 static void crypto_finalize_request(struct cry 25 static void crypto_finalize_request(struct crypto_engine *engine, 41 struct cry 26 struct crypto_async_request *req, int err) 42 { 27 { 43 unsigned long flags; 28 unsigned long flags; >> 29 bool finalize_req = false; >> 30 int ret; >> 31 struct crypto_engine_ctx *enginectx; 44 32 45 /* 33 /* 46 * If hardware cannot enqueue more req 34 * If hardware cannot enqueue more requests 47 * and retry mechanism is not supporte 35 * and retry mechanism is not supported 48 * make sure we are completing the cur 36 * make sure we are completing the current request 49 */ 37 */ 50 if (!engine->retry_support) { 38 if (!engine->retry_support) { 51 spin_lock_irqsave(&engine->que 39 spin_lock_irqsave(&engine->queue_lock, flags); 52 if (engine->cur_req == req) { 40 if (engine->cur_req == req) { >> 41 finalize_req = true; 53 engine->cur_req = NULL 42 engine->cur_req = NULL; 54 } 43 } 55 spin_unlock_irqrestore(&engine 44 spin_unlock_irqrestore(&engine->queue_lock, flags); 56 } 45 } 57 46 >> 47 if (finalize_req || engine->retry_support) { >> 48 enginectx = crypto_tfm_ctx(req->tfm); >> 49 if (enginectx->op.prepare_request && >> 50 enginectx->op.unprepare_request) { >> 51 ret = enginectx->op.unprepare_request(engine, req); >> 52 if (ret) >> 53 dev_err(engine->dev, "failed to unprepare request\n"); >> 54 } >> 55 } 58 lockdep_assert_in_softirq(); 56 lockdep_assert_in_softirq(); 59 crypto_request_complete(req, err); !! 57 req->complete(req, err); 60 58 61 kthread_queue_work(engine->kworker, &e 59 kthread_queue_work(engine->kworker, &engine->pump_requests); 62 } 60 } 63 61 64 /** 62 /** 65 * crypto_pump_requests - dequeue one request 63 * crypto_pump_requests - dequeue one request from engine queue to process 66 * @engine: the hardware engine 64 * @engine: the hardware engine 67 * @in_kthread: true if we are in the context 65 * @in_kthread: true if we are in the context of the request pump thread 68 * 66 * 69 * This function checks if there is any reques 67 * This function checks if there is any request in the engine queue that 70 * needs processing and if so call out to the 68 * needs processing and if so call out to the driver to initialize hardware 71 * and handle each request. 69 * and handle each request. 72 */ 70 */ 73 static void crypto_pump_requests(struct crypto 71 static void crypto_pump_requests(struct crypto_engine *engine, 74 bool in_kthre 72 bool in_kthread) 75 { 73 { 76 struct crypto_async_request *async_req 74 struct crypto_async_request *async_req, *backlog; 77 struct crypto_engine_alg *alg; << 78 struct crypto_engine_op *op; << 79 unsigned long flags; 75 unsigned long flags; 80 bool was_busy = false; 76 bool was_busy = false; 81 int ret; 77 int ret; >> 78 struct crypto_engine_ctx *enginectx; 82 79 83 spin_lock_irqsave(&engine->queue_lock, 80 spin_lock_irqsave(&engine->queue_lock, flags); 84 81 85 /* Make sure we are not already runnin 82 /* Make sure we are not already running a request */ 86 if (!engine->retry_support && engine-> 83 if (!engine->retry_support && engine->cur_req) 87 goto out; 84 goto out; 88 85 89 /* If another context is idling then d 86 /* If another context is idling then defer */ 90 if (engine->idling) { 87 if (engine->idling) { 91 kthread_queue_work(engine->kwo 88 kthread_queue_work(engine->kworker, &engine->pump_requests); 92 goto out; 89 goto out; 93 } 90 } 94 91 95 /* Check if the engine queue is idle * 92 /* Check if the engine queue is idle */ 96 if (!crypto_queue_len(&engine->queue) 93 if (!crypto_queue_len(&engine->queue) || !engine->running) { 97 if (!engine->busy) 94 if (!engine->busy) 98 goto out; 95 goto out; 99 96 100 /* Only do teardown in the thr 97 /* Only do teardown in the thread */ 101 if (!in_kthread) { 98 if (!in_kthread) { 102 kthread_queue_work(eng 99 kthread_queue_work(engine->kworker, 103 &en 100 &engine->pump_requests); 104 goto out; 101 goto out; 105 } 102 } 106 103 107 engine->busy = false; 104 engine->busy = false; 108 engine->idling = true; 105 engine->idling = true; 109 spin_unlock_irqrestore(&engine 106 spin_unlock_irqrestore(&engine->queue_lock, flags); 110 107 111 if (engine->unprepare_crypt_ha 108 if (engine->unprepare_crypt_hardware && 112 engine->unprepare_crypt_ha 109 engine->unprepare_crypt_hardware(engine)) 113 dev_err(engine->dev, " 110 dev_err(engine->dev, "failed to unprepare crypt hardware\n"); 114 111 115 spin_lock_irqsave(&engine->que 112 spin_lock_irqsave(&engine->queue_lock, flags); 116 engine->idling = false; 113 engine->idling = false; 117 goto out; 114 goto out; 118 } 115 } 119 116 120 start_request: 117 start_request: 121 /* Get the fist request from the engin 118 /* Get the fist request from the engine queue to handle */ 122 backlog = crypto_get_backlog(&engine-> 119 backlog = crypto_get_backlog(&engine->queue); 123 async_req = crypto_dequeue_request(&en 120 async_req = crypto_dequeue_request(&engine->queue); 124 if (!async_req) 121 if (!async_req) 125 goto out; 122 goto out; 126 123 127 /* 124 /* 128 * If hardware doesn't support the ret 125 * If hardware doesn't support the retry mechanism, 129 * keep track of the request we are pr 126 * keep track of the request we are processing now. 130 * We'll need it on completion (crypto 127 * We'll need it on completion (crypto_finalize_request). 131 */ 128 */ 132 if (!engine->retry_support) 129 if (!engine->retry_support) 133 engine->cur_req = async_req; 130 engine->cur_req = async_req; 134 131 >> 132 if (backlog) >> 133 backlog->complete(backlog, -EINPROGRESS); >> 134 135 if (engine->busy) 135 if (engine->busy) 136 was_busy = true; 136 was_busy = true; 137 else 137 else 138 engine->busy = true; 138 engine->busy = true; 139 139 140 spin_unlock_irqrestore(&engine->queue_ 140 spin_unlock_irqrestore(&engine->queue_lock, flags); 141 141 142 /* Until here we get the request need 142 /* Until here we get the request need to be encrypted successfully */ 143 if (!was_busy && engine->prepare_crypt 143 if (!was_busy && engine->prepare_crypt_hardware) { 144 ret = engine->prepare_crypt_ha 144 ret = engine->prepare_crypt_hardware(engine); 145 if (ret) { 145 if (ret) { 146 dev_err(engine->dev, " 146 dev_err(engine->dev, "failed to prepare crypt hardware\n"); 147 goto req_err_1; !! 147 goto req_err_2; 148 } 148 } 149 } 149 } 150 150 151 if (async_req->tfm->__crt_alg->cra_fla !! 151 enginectx = crypto_tfm_ctx(async_req->tfm); 152 alg = container_of(async_req-> !! 152 153 struct cryp !! 153 if (enginectx->op.prepare_request) { 154 op = &alg->op; !! 154 ret = enginectx->op.prepare_request(engine, async_req); 155 } else { !! 155 if (ret) { >> 156 dev_err(engine->dev, "failed to prepare request: %d\n", >> 157 ret); >> 158 goto req_err_2; >> 159 } >> 160 } >> 161 if (!enginectx->op.do_one_request) { 156 dev_err(engine->dev, "failed t 162 dev_err(engine->dev, "failed to do request\n"); 157 ret = -EINVAL; 163 ret = -EINVAL; 158 goto req_err_1; 164 goto req_err_1; 159 } 165 } 160 166 161 ret = op->do_one_request(engine, async !! 167 ret = enginectx->op.do_one_request(engine, async_req); 162 168 163 /* Request unsuccessfully executed by 169 /* Request unsuccessfully executed by hardware */ 164 if (ret < 0) { 170 if (ret < 0) { 165 /* 171 /* 166 * If hardware queue is full ( 172 * If hardware queue is full (-ENOSPC), requeue request 167 * regardless of backlog flag. 173 * regardless of backlog flag. 168 * Otherwise, unprepare and co 174 * Otherwise, unprepare and complete the request. 169 */ 175 */ 170 if (!engine->retry_support || 176 if (!engine->retry_support || 171 (ret != -ENOSPC)) { 177 (ret != -ENOSPC)) { 172 dev_err(engine->dev, 178 dev_err(engine->dev, 173 "Failed to do 179 "Failed to do one request from queue: %d\n", 174 ret); 180 ret); 175 goto req_err_1; 181 goto req_err_1; 176 } 182 } >> 183 /* >> 184 * If retry mechanism is supported, >> 185 * unprepare current request and >> 186 * enqueue it back into crypto-engine queue. >> 187 */ >> 188 if (enginectx->op.unprepare_request) { >> 189 ret = enginectx->op.unprepare_request(engine, >> 190 async_req); >> 191 if (ret) >> 192 dev_err(engine->dev, >> 193 "failed to unprepare request\n"); >> 194 } 177 spin_lock_irqsave(&engine->que 195 spin_lock_irqsave(&engine->queue_lock, flags); 178 /* 196 /* 179 * If hardware was unable to e 197 * If hardware was unable to execute request, enqueue it 180 * back in front of crypto-eng 198 * back in front of crypto-engine queue, to keep the order 181 * of requests. 199 * of requests. 182 */ 200 */ 183 crypto_enqueue_request_head(&e 201 crypto_enqueue_request_head(&engine->queue, async_req); 184 202 185 kthread_queue_work(engine->kwo 203 kthread_queue_work(engine->kworker, &engine->pump_requests); 186 goto out; 204 goto out; 187 } 205 } 188 206 189 goto retry; 207 goto retry; 190 208 191 req_err_1: 209 req_err_1: 192 crypto_request_complete(async_req, ret !! 210 if (enginectx->op.unprepare_request) { >> 211 ret = enginectx->op.unprepare_request(engine, async_req); >> 212 if (ret) >> 213 dev_err(engine->dev, "failed to unprepare request\n"); >> 214 } 193 215 194 retry: !! 216 req_err_2: 195 if (backlog) !! 217 async_req->complete(async_req, ret); 196 crypto_request_complete(backlo << 197 218 >> 219 retry: 198 /* If retry mechanism is supported, se 220 /* If retry mechanism is supported, send new requests to engine */ 199 if (engine->retry_support) { 221 if (engine->retry_support) { 200 spin_lock_irqsave(&engine->que 222 spin_lock_irqsave(&engine->queue_lock, flags); 201 goto start_request; 223 goto start_request; 202 } 224 } 203 return; 225 return; 204 226 205 out: 227 out: 206 spin_unlock_irqrestore(&engine->queue_ 228 spin_unlock_irqrestore(&engine->queue_lock, flags); 207 229 208 /* 230 /* 209 * Batch requests is possible only if 231 * Batch requests is possible only if 210 * hardware can enqueue multiple reque 232 * hardware can enqueue multiple requests 211 */ 233 */ 212 if (engine->do_batch_requests) { 234 if (engine->do_batch_requests) { 213 ret = engine->do_batch_request 235 ret = engine->do_batch_requests(engine); 214 if (ret) 236 if (ret) 215 dev_err(engine->dev, " 237 dev_err(engine->dev, "failed to do batch requests: %d\n", 216 ret); 238 ret); 217 } 239 } 218 240 219 return; 241 return; 220 } 242 } 221 243 222 static void crypto_pump_work(struct kthread_wo 244 static void crypto_pump_work(struct kthread_work *work) 223 { 245 { 224 struct crypto_engine *engine = 246 struct crypto_engine *engine = 225 container_of(work, struct cryp 247 container_of(work, struct crypto_engine, pump_requests); 226 248 227 crypto_pump_requests(engine, true); 249 crypto_pump_requests(engine, true); 228 } 250 } 229 251 230 /** 252 /** 231 * crypto_transfer_request - transfer the new 253 * crypto_transfer_request - transfer the new request into the engine queue 232 * @engine: the hardware engine 254 * @engine: the hardware engine 233 * @req: the request need to be listed into th 255 * @req: the request need to be listed into the engine queue 234 * @need_pump: indicates whether queue the pum 256 * @need_pump: indicates whether queue the pump of request to kthread_work 235 */ 257 */ 236 static int crypto_transfer_request(struct cryp 258 static int crypto_transfer_request(struct crypto_engine *engine, 237 struct cryp 259 struct crypto_async_request *req, 238 bool need_p 260 bool need_pump) 239 { 261 { 240 unsigned long flags; 262 unsigned long flags; 241 int ret; 263 int ret; 242 264 243 spin_lock_irqsave(&engine->queue_lock, 265 spin_lock_irqsave(&engine->queue_lock, flags); 244 266 245 if (!engine->running) { 267 if (!engine->running) { 246 spin_unlock_irqrestore(&engine 268 spin_unlock_irqrestore(&engine->queue_lock, flags); 247 return -ESHUTDOWN; 269 return -ESHUTDOWN; 248 } 270 } 249 271 250 ret = crypto_enqueue_request(&engine-> 272 ret = crypto_enqueue_request(&engine->queue, req); 251 273 252 if (!engine->busy && need_pump) 274 if (!engine->busy && need_pump) 253 kthread_queue_work(engine->kwo 275 kthread_queue_work(engine->kworker, &engine->pump_requests); 254 276 255 spin_unlock_irqrestore(&engine->queue_ 277 spin_unlock_irqrestore(&engine->queue_lock, flags); 256 return ret; 278 return ret; 257 } 279 } 258 280 259 /** 281 /** 260 * crypto_transfer_request_to_engine - transfe 282 * crypto_transfer_request_to_engine - transfer one request to list 261 * into the engine queue 283 * into the engine queue 262 * @engine: the hardware engine 284 * @engine: the hardware engine 263 * @req: the request need to be listed into th 285 * @req: the request need to be listed into the engine queue 264 */ 286 */ 265 static int crypto_transfer_request_to_engine(s 287 static int crypto_transfer_request_to_engine(struct crypto_engine *engine, 266 s 288 struct crypto_async_request *req) 267 { 289 { 268 return crypto_transfer_request(engine, 290 return crypto_transfer_request(engine, req, true); 269 } 291 } 270 292 271 /** 293 /** 272 * crypto_transfer_aead_request_to_engine - tr 294 * crypto_transfer_aead_request_to_engine - transfer one aead_request 273 * to list into the engine queue 295 * to list into the engine queue 274 * @engine: the hardware engine 296 * @engine: the hardware engine 275 * @req: the request need to be listed into th 297 * @req: the request need to be listed into the engine queue 276 */ 298 */ 277 int crypto_transfer_aead_request_to_engine(str 299 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, 278 str 300 struct aead_request *req) 279 { 301 { 280 return crypto_transfer_request_to_engi 302 return crypto_transfer_request_to_engine(engine, &req->base); 281 } 303 } 282 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request 304 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); 283 305 284 /** 306 /** 285 * crypto_transfer_akcipher_request_to_engine 307 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request 286 * to list into the engine queue 308 * to list into the engine queue 287 * @engine: the hardware engine 309 * @engine: the hardware engine 288 * @req: the request need to be listed into th 310 * @req: the request need to be listed into the engine queue 289 */ 311 */ 290 int crypto_transfer_akcipher_request_to_engine 312 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, 291 313 struct akcipher_request *req) 292 { 314 { 293 return crypto_transfer_request_to_engi 315 return crypto_transfer_request_to_engine(engine, &req->base); 294 } 316 } 295 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_req 317 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); 296 318 297 /** 319 /** 298 * crypto_transfer_hash_request_to_engine - tr 320 * crypto_transfer_hash_request_to_engine - transfer one ahash_request 299 * to list into the engine queue 321 * to list into the engine queue 300 * @engine: the hardware engine 322 * @engine: the hardware engine 301 * @req: the request need to be listed into th 323 * @req: the request need to be listed into the engine queue 302 */ 324 */ 303 int crypto_transfer_hash_request_to_engine(str 325 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, 304 str 326 struct ahash_request *req) 305 { 327 { 306 return crypto_transfer_request_to_engi 328 return crypto_transfer_request_to_engine(engine, &req->base); 307 } 329 } 308 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request 330 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); 309 331 310 /** 332 /** 311 * crypto_transfer_kpp_request_to_engine - tra 333 * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list 312 * into the engine queue 334 * into the engine queue 313 * @engine: the hardware engine 335 * @engine: the hardware engine 314 * @req: the request need to be listed into th 336 * @req: the request need to be listed into the engine queue 315 */ 337 */ 316 int crypto_transfer_kpp_request_to_engine(stru 338 int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, 317 stru 339 struct kpp_request *req) 318 { 340 { 319 return crypto_transfer_request_to_engi 341 return crypto_transfer_request_to_engine(engine, &req->base); 320 } 342 } 321 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_ 343 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine); 322 344 323 /** 345 /** 324 * crypto_transfer_skcipher_request_to_engine 346 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request 325 * to list into the engine queue 347 * to list into the engine queue 326 * @engine: the hardware engine 348 * @engine: the hardware engine 327 * @req: the request need to be listed into th 349 * @req: the request need to be listed into the engine queue 328 */ 350 */ 329 int crypto_transfer_skcipher_request_to_engine 351 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, 330 352 struct skcipher_request *req) 331 { 353 { 332 return crypto_transfer_request_to_engi 354 return crypto_transfer_request_to_engine(engine, &req->base); 333 } 355 } 334 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_req 356 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); 335 357 336 /** 358 /** 337 * crypto_finalize_aead_request - finalize one 359 * crypto_finalize_aead_request - finalize one aead_request if 338 * the request is done 360 * the request is done 339 * @engine: the hardware engine 361 * @engine: the hardware engine 340 * @req: the request need to be finalized 362 * @req: the request need to be finalized 341 * @err: error number 363 * @err: error number 342 */ 364 */ 343 void crypto_finalize_aead_request(struct crypt 365 void crypto_finalize_aead_request(struct crypto_engine *engine, 344 struct aead_ 366 struct aead_request *req, int err) 345 { 367 { 346 return crypto_finalize_request(engine, 368 return crypto_finalize_request(engine, &req->base, err); 347 } 369 } 348 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request 370 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); 349 371 350 /** 372 /** 351 * crypto_finalize_akcipher_request - finalize 373 * crypto_finalize_akcipher_request - finalize one akcipher_request if 352 * the request is done 374 * the request is done 353 * @engine: the hardware engine 375 * @engine: the hardware engine 354 * @req: the request need to be finalized 376 * @req: the request need to be finalized 355 * @err: error number 377 * @err: error number 356 */ 378 */ 357 void crypto_finalize_akcipher_request(struct c 379 void crypto_finalize_akcipher_request(struct crypto_engine *engine, 358 struct a 380 struct akcipher_request *req, int err) 359 { 381 { 360 return crypto_finalize_request(engine, 382 return crypto_finalize_request(engine, &req->base, err); 361 } 383 } 362 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_req 384 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); 363 385 364 /** 386 /** 365 * crypto_finalize_hash_request - finalize one 387 * crypto_finalize_hash_request - finalize one ahash_request if 366 * the request is done 388 * the request is done 367 * @engine: the hardware engine 389 * @engine: the hardware engine 368 * @req: the request need to be finalized 390 * @req: the request need to be finalized 369 * @err: error number 391 * @err: error number 370 */ 392 */ 371 void crypto_finalize_hash_request(struct crypt 393 void crypto_finalize_hash_request(struct crypto_engine *engine, 372 struct ahash 394 struct ahash_request *req, int err) 373 { 395 { 374 return crypto_finalize_request(engine, 396 return crypto_finalize_request(engine, &req->base, err); 375 } 397 } 376 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request 398 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); 377 399 378 /** 400 /** 379 * crypto_finalize_kpp_request - finalize one 401 * crypto_finalize_kpp_request - finalize one kpp_request if the request is done 380 * @engine: the hardware engine 402 * @engine: the hardware engine 381 * @req: the request need to be finalized 403 * @req: the request need to be finalized 382 * @err: error number 404 * @err: error number 383 */ 405 */ 384 void crypto_finalize_kpp_request(struct crypto 406 void crypto_finalize_kpp_request(struct crypto_engine *engine, 385 struct kpp_re 407 struct kpp_request *req, int err) 386 { 408 { 387 return crypto_finalize_request(engine, 409 return crypto_finalize_request(engine, &req->base, err); 388 } 410 } 389 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request) 411 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request); 390 412 391 /** 413 /** 392 * crypto_finalize_skcipher_request - finalize 414 * crypto_finalize_skcipher_request - finalize one skcipher_request if 393 * the request is done 415 * the request is done 394 * @engine: the hardware engine 416 * @engine: the hardware engine 395 * @req: the request need to be finalized 417 * @req: the request need to be finalized 396 * @err: error number 418 * @err: error number 397 */ 419 */ 398 void crypto_finalize_skcipher_request(struct c 420 void crypto_finalize_skcipher_request(struct crypto_engine *engine, 399 struct s 421 struct skcipher_request *req, int err) 400 { 422 { 401 return crypto_finalize_request(engine, 423 return crypto_finalize_request(engine, &req->base, err); 402 } 424 } 403 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_req 425 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); 404 426 405 /** 427 /** 406 * crypto_engine_start - start the hardware en 428 * crypto_engine_start - start the hardware engine 407 * @engine: the hardware engine need to be sta 429 * @engine: the hardware engine need to be started 408 * 430 * 409 * Return 0 on success, else on fail. 431 * Return 0 on success, else on fail. 410 */ 432 */ 411 int crypto_engine_start(struct crypto_engine * 433 int crypto_engine_start(struct crypto_engine *engine) 412 { 434 { 413 unsigned long flags; 435 unsigned long flags; 414 436 415 spin_lock_irqsave(&engine->queue_lock, 437 spin_lock_irqsave(&engine->queue_lock, flags); 416 438 417 if (engine->running || engine->busy) { 439 if (engine->running || engine->busy) { 418 spin_unlock_irqrestore(&engine 440 spin_unlock_irqrestore(&engine->queue_lock, flags); 419 return -EBUSY; 441 return -EBUSY; 420 } 442 } 421 443 422 engine->running = true; 444 engine->running = true; 423 spin_unlock_irqrestore(&engine->queue_ 445 spin_unlock_irqrestore(&engine->queue_lock, flags); 424 446 425 kthread_queue_work(engine->kworker, &e 447 kthread_queue_work(engine->kworker, &engine->pump_requests); 426 448 427 return 0; 449 return 0; 428 } 450 } 429 EXPORT_SYMBOL_GPL(crypto_engine_start); 451 EXPORT_SYMBOL_GPL(crypto_engine_start); 430 452 431 /** 453 /** 432 * crypto_engine_stop - stop the hardware engi 454 * crypto_engine_stop - stop the hardware engine 433 * @engine: the hardware engine need to be sto 455 * @engine: the hardware engine need to be stopped 434 * 456 * 435 * Return 0 on success, else on fail. 457 * Return 0 on success, else on fail. 436 */ 458 */ 437 int crypto_engine_stop(struct crypto_engine *e 459 int crypto_engine_stop(struct crypto_engine *engine) 438 { 460 { 439 unsigned long flags; 461 unsigned long flags; 440 unsigned int limit = 500; 462 unsigned int limit = 500; 441 int ret = 0; 463 int ret = 0; 442 464 443 spin_lock_irqsave(&engine->queue_lock, 465 spin_lock_irqsave(&engine->queue_lock, flags); 444 466 445 /* 467 /* 446 * If the engine queue is not empty or 468 * If the engine queue is not empty or the engine is on busy state, 447 * we need to wait for a while to pump 469 * we need to wait for a while to pump the requests of engine queue. 448 */ 470 */ 449 while ((crypto_queue_len(&engine->queu 471 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { 450 spin_unlock_irqrestore(&engine 472 spin_unlock_irqrestore(&engine->queue_lock, flags); 451 msleep(20); 473 msleep(20); 452 spin_lock_irqsave(&engine->que 474 spin_lock_irqsave(&engine->queue_lock, flags); 453 } 475 } 454 476 455 if (crypto_queue_len(&engine->queue) | 477 if (crypto_queue_len(&engine->queue) || engine->busy) 456 ret = -EBUSY; 478 ret = -EBUSY; 457 else 479 else 458 engine->running = false; 480 engine->running = false; 459 481 460 spin_unlock_irqrestore(&engine->queue_ 482 spin_unlock_irqrestore(&engine->queue_lock, flags); 461 483 462 if (ret) 484 if (ret) 463 dev_warn(engine->dev, "could n 485 dev_warn(engine->dev, "could not stop engine\n"); 464 486 465 return ret; 487 return ret; 466 } 488 } 467 EXPORT_SYMBOL_GPL(crypto_engine_stop); 489 EXPORT_SYMBOL_GPL(crypto_engine_stop); 468 490 469 /** 491 /** 470 * crypto_engine_alloc_init_and_set - allocate 492 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure 471 * and initialize it by setting the maximum nu 493 * and initialize it by setting the maximum number of entries in the software 472 * crypto-engine queue. 494 * crypto-engine queue. 473 * @dev: the device attached with one hardware 495 * @dev: the device attached with one hardware engine 474 * @retry_support: whether hardware has suppor 496 * @retry_support: whether hardware has support for retry mechanism 475 * @cbk_do_batch: pointer to a callback functi 497 * @cbk_do_batch: pointer to a callback function to be invoked when executing 476 * a batch of requests. 498 * a batch of requests. 477 * This has the form: 499 * This has the form: 478 * callback(struct crypto_engin 500 * callback(struct crypto_engine *engine) 479 * where: 501 * where: 480 * engine: the crypto engine st !! 502 * @engine: the crypto engine structure. 481 * @rt: whether this queue is set to run as a 503 * @rt: whether this queue is set to run as a realtime task 482 * @qlen: maximum size of the crypto-engine qu 504 * @qlen: maximum size of the crypto-engine queue 483 * 505 * 484 * This must be called from context that can s 506 * This must be called from context that can sleep. 485 * Return: the crypto engine structure on succ 507 * Return: the crypto engine structure on success, else NULL. 486 */ 508 */ 487 struct crypto_engine *crypto_engine_alloc_init 509 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, 488 510 bool retry_support, 489 511 int (*cbk_do_batch)(struct crypto_engine *engine), 490 512 bool rt, int qlen) 491 { 513 { 492 struct crypto_engine *engine; 514 struct crypto_engine *engine; 493 515 494 if (!dev) 516 if (!dev) 495 return NULL; 517 return NULL; 496 518 497 engine = devm_kzalloc(dev, sizeof(*eng 519 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); 498 if (!engine) 520 if (!engine) 499 return NULL; 521 return NULL; 500 522 501 engine->dev = dev; 523 engine->dev = dev; 502 engine->rt = rt; 524 engine->rt = rt; 503 engine->running = false; 525 engine->running = false; 504 engine->busy = false; 526 engine->busy = false; 505 engine->idling = false; 527 engine->idling = false; 506 engine->retry_support = retry_support; 528 engine->retry_support = retry_support; 507 engine->priv_data = dev; 529 engine->priv_data = dev; 508 /* 530 /* 509 * Batch requests is possible only if 531 * Batch requests is possible only if 510 * hardware has support for retry mech 532 * hardware has support for retry mechanism. 511 */ 533 */ 512 engine->do_batch_requests = retry_supp 534 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; 513 535 514 snprintf(engine->name, sizeof(engine-> 536 snprintf(engine->name, sizeof(engine->name), 515 "%s-engine", dev_name(dev)); 537 "%s-engine", dev_name(dev)); 516 538 517 crypto_init_queue(&engine->queue, qlen 539 crypto_init_queue(&engine->queue, qlen); 518 spin_lock_init(&engine->queue_lock); 540 spin_lock_init(&engine->queue_lock); 519 541 520 engine->kworker = kthread_create_worke 542 engine->kworker = kthread_create_worker(0, "%s", engine->name); 521 if (IS_ERR(engine->kworker)) { 543 if (IS_ERR(engine->kworker)) { 522 dev_err(dev, "failed to create 544 dev_err(dev, "failed to create crypto request pump task\n"); 523 return NULL; 545 return NULL; 524 } 546 } 525 kthread_init_work(&engine->pump_reques 547 kthread_init_work(&engine->pump_requests, crypto_pump_work); 526 548 527 if (engine->rt) { 549 if (engine->rt) { 528 dev_info(dev, "will run reques 550 dev_info(dev, "will run requests pump with realtime priority\n"); 529 sched_set_fifo(engine->kworker 551 sched_set_fifo(engine->kworker->task); 530 } 552 } 531 553 532 return engine; 554 return engine; 533 } 555 } 534 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and 556 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); 535 557 536 /** 558 /** 537 * crypto_engine_alloc_init - allocate crypto 559 * crypto_engine_alloc_init - allocate crypto hardware engine structure and 538 * initialize it. 560 * initialize it. 539 * @dev: the device attached with one hardware 561 * @dev: the device attached with one hardware engine 540 * @rt: whether this queue is set to run as a 562 * @rt: whether this queue is set to run as a realtime task 541 * 563 * 542 * This must be called from context that can s 564 * This must be called from context that can sleep. 543 * Return: the crypto engine structure on succ 565 * Return: the crypto engine structure on success, else NULL. 544 */ 566 */ 545 struct crypto_engine *crypto_engine_alloc_init 567 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) 546 { 568 { 547 return crypto_engine_alloc_init_and_se 569 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, 548 570 CRYPTO_ENGINE_MAX_QLEN); 549 } 571 } 550 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); 572 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); 551 573 552 /** 574 /** 553 * crypto_engine_exit - free the resources of 575 * crypto_engine_exit - free the resources of hardware engine when exit 554 * @engine: the hardware engine need to be fre 576 * @engine: the hardware engine need to be freed >> 577 * >> 578 * Return 0 for success. 555 */ 579 */ 556 void crypto_engine_exit(struct crypto_engine * !! 580 int crypto_engine_exit(struct crypto_engine *engine) 557 { 581 { 558 int ret; 582 int ret; 559 583 560 ret = crypto_engine_stop(engine); 584 ret = crypto_engine_stop(engine); 561 if (ret) 585 if (ret) 562 return; !! 586 return ret; 563 587 564 kthread_destroy_worker(engine->kworker 588 kthread_destroy_worker(engine->kworker); 565 } << 566 EXPORT_SYMBOL_GPL(crypto_engine_exit); << 567 << 568 int crypto_engine_register_aead(struct aead_en << 569 { << 570 if (!alg->op.do_one_request) << 571 return -EINVAL; << 572 << 573 alg->base.base.cra_flags |= CRYPTO_ALG << 574 << 575 return crypto_register_aead(&alg->base << 576 } << 577 EXPORT_SYMBOL_GPL(crypto_engine_register_aead) << 578 << 579 void crypto_engine_unregister_aead(struct aead << 580 { << 581 crypto_unregister_aead(&alg->base); << 582 } << 583 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aea << 584 << 585 int crypto_engine_register_aeads(struct aead_e << 586 { << 587 int i, ret; << 588 << 589 for (i = 0; i < count; i++) { << 590 ret = crypto_engine_register_a << 591 if (ret) << 592 goto err; << 593 } << 594 589 595 return 0; 590 return 0; 596 << 597 err: << 598 crypto_engine_unregister_aeads(algs, i << 599 << 600 return ret; << 601 } 591 } 602 EXPORT_SYMBOL_GPL(crypto_engine_register_aeads !! 592 EXPORT_SYMBOL_GPL(crypto_engine_exit); 603 << 604 void crypto_engine_unregister_aeads(struct aea << 605 { << 606 int i; << 607 << 608 for (i = count - 1; i >= 0; --i) << 609 crypto_engine_unregister_aead( << 610 } << 611 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aea << 612 << 613 int crypto_engine_register_ahash(struct ahash_ << 614 { << 615 if (!alg->op.do_one_request) << 616 return -EINVAL; << 617 << 618 alg->base.halg.base.cra_flags |= CRYPT << 619 << 620 return crypto_register_ahash(&alg->bas << 621 } << 622 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash << 623 << 624 void crypto_engine_unregister_ahash(struct aha << 625 { << 626 crypto_unregister_ahash(&alg->base); << 627 } << 628 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aha << 629 << 630 int crypto_engine_register_ahashes(struct ahas << 631 { << 632 int i, ret; << 633 << 634 for (i = 0; i < count; i++) { << 635 ret = crypto_engine_register_a << 636 if (ret) << 637 goto err; << 638 } << 639 << 640 return 0; << 641 << 642 err: << 643 crypto_engine_unregister_ahashes(algs, << 644 << 645 return ret; << 646 } << 647 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash << 648 << 649 void crypto_engine_unregister_ahashes(struct a << 650 int coun << 651 { << 652 int i; << 653 << 654 for (i = count - 1; i >= 0; --i) << 655 crypto_engine_unregister_ahash << 656 } << 657 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aha << 658 << 659 int crypto_engine_register_akcipher(struct akc << 660 { << 661 if (!alg->op.do_one_request) << 662 return -EINVAL; << 663 << 664 alg->base.base.cra_flags |= CRYPTO_ALG << 665 << 666 return crypto_register_akcipher(&alg-> << 667 } << 668 EXPORT_SYMBOL_GPL(crypto_engine_register_akcip << 669 << 670 void crypto_engine_unregister_akcipher(struct << 671 { << 672 crypto_unregister_akcipher(&alg->base) << 673 } << 674 EXPORT_SYMBOL_GPL(crypto_engine_unregister_akc << 675 << 676 int crypto_engine_register_kpp(struct kpp_engi << 677 { << 678 if (!alg->op.do_one_request) << 679 return -EINVAL; << 680 << 681 alg->base.base.cra_flags |= CRYPTO_ALG << 682 << 683 return crypto_register_kpp(&alg->base) << 684 } << 685 EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); << 686 << 687 void crypto_engine_unregister_kpp(struct kpp_e << 688 { << 689 crypto_unregister_kpp(&alg->base); << 690 } << 691 EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp << 692 << 693 int crypto_engine_register_skcipher(struct skc << 694 { << 695 if (!alg->op.do_one_request) << 696 return -EINVAL; << 697 << 698 alg->base.base.cra_flags |= CRYPTO_ALG << 699 << 700 return crypto_register_skcipher(&alg-> << 701 } << 702 EXPORT_SYMBOL_GPL(crypto_engine_register_skcip << 703 << 704 void crypto_engine_unregister_skcipher(struct << 705 { << 706 return crypto_unregister_skcipher(&alg << 707 } << 708 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skc << 709 << 710 int crypto_engine_register_skciphers(struct sk << 711 int count << 712 { << 713 int i, ret; << 714 << 715 for (i = 0; i < count; i++) { << 716 ret = crypto_engine_register_s << 717 if (ret) << 718 goto err; << 719 } << 720 << 721 return 0; << 722 << 723 err: << 724 crypto_engine_unregister_skciphers(alg << 725 << 726 return ret; << 727 } << 728 EXPORT_SYMBOL_GPL(crypto_engine_register_skcip << 729 << 730 void crypto_engine_unregister_skciphers(struct << 731 int co << 732 { << 733 int i; << 734 << 735 for (i = count - 1; i >= 0; --i) << 736 crypto_engine_unregister_skcip << 737 } << 738 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skc << 739 593 740 MODULE_LICENSE("GPL"); 594 MODULE_LICENSE("GPL"); 741 MODULE_DESCRIPTION("Crypto hardware engine fra 595 MODULE_DESCRIPTION("Crypto hardware engine framework"); 742 596
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.