1 // SPDX-License-Identifier: GPL-2.0-or-later 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 2 /* 3 * Handle async block request by crypto hardwa 3 * Handle async block request by crypto hardware engine. 4 * 4 * 5 * Copyright (C) 2016 Linaro, Inc. 5 * Copyright (C) 2016 Linaro, Inc. 6 * 6 * 7 * Author: Baolin Wang <baolin.wang@linaro.org 7 * Author: Baolin Wang <baolin.wang@linaro.org> 8 */ 8 */ 9 9 10 #include <crypto/internal/aead.h> << 11 #include <crypto/internal/akcipher.h> << 12 #include <crypto/internal/engine.h> << 13 #include <crypto/internal/hash.h> << 14 #include <crypto/internal/kpp.h> << 15 #include <crypto/internal/skcipher.h> << 16 #include <linux/err.h> 10 #include <linux/err.h> 17 #include <linux/delay.h> 11 #include <linux/delay.h> 18 #include <linux/device.h> !! 12 #include <crypto/engine.h> 19 #include <linux/kernel.h> << 20 #include <linux/module.h> << 21 #include <uapi/linux/sched/types.h> 13 #include <uapi/linux/sched/types.h> 22 #include "internal.h" 14 #include "internal.h" 23 15 24 #define CRYPTO_ENGINE_MAX_QLEN 10 16 #define CRYPTO_ENGINE_MAX_QLEN 10 25 17 26 /* Temporary algorithm flag used to indicate a << 27 #define CRYPTO_ALG_ENGINE 0x200 << 28 << 29 struct crypto_engine_alg { << 30 struct crypto_alg base; << 31 struct crypto_engine_op op; << 32 }; << 33 << 34 /** 18 /** 35 * crypto_finalize_request - finalize one requ 19 * crypto_finalize_request - finalize one request if the request is done 36 * @engine: the hardware engine 20 * @engine: the hardware engine 37 * @req: the request need to be finalized 21 * @req: the request need to be finalized 38 * @err: error number 22 * @err: error number 39 */ 23 */ 40 static void crypto_finalize_request(struct cry 24 static void crypto_finalize_request(struct crypto_engine *engine, 41 struct cry !! 25 struct crypto_async_request *req, int err) 42 { 26 { 43 unsigned long flags; 27 unsigned long flags; >> 28 bool finalize_cur_req = false; >> 29 int ret; >> 30 struct crypto_engine_ctx *enginectx; 44 31 45 /* !! 32 spin_lock_irqsave(&engine->queue_lock, flags); 46 * If hardware cannot enqueue more req !! 33 if (engine->cur_req == req) 47 * and retry mechanism is not supporte !! 34 finalize_cur_req = true; 48 * make sure we are completing the cur !! 35 spin_unlock_irqrestore(&engine->queue_lock, flags); 49 */ !! 36 50 if (!engine->retry_support) { !! 37 if (finalize_cur_req) { 51 spin_lock_irqsave(&engine->que !! 38 enginectx = crypto_tfm_ctx(req->tfm); 52 if (engine->cur_req == req) { !! 39 if (engine->cur_req_prepared && 53 engine->cur_req = NULL !! 40 enginectx->op.unprepare_request) { >> 41 ret = enginectx->op.unprepare_request(engine, req); >> 42 if (ret) >> 43 dev_err(engine->dev, "failed to unprepare request\n"); 54 } 44 } >> 45 spin_lock_irqsave(&engine->queue_lock, flags); >> 46 engine->cur_req = NULL; >> 47 engine->cur_req_prepared = false; 55 spin_unlock_irqrestore(&engine 48 spin_unlock_irqrestore(&engine->queue_lock, flags); 56 } 49 } 57 50 58 lockdep_assert_in_softirq(); !! 51 req->complete(req, err); 59 crypto_request_complete(req, err); << 60 52 61 kthread_queue_work(engine->kworker, &e 53 kthread_queue_work(engine->kworker, &engine->pump_requests); 62 } 54 } 63 55 64 /** 56 /** 65 * crypto_pump_requests - dequeue one request 57 * crypto_pump_requests - dequeue one request from engine queue to process 66 * @engine: the hardware engine 58 * @engine: the hardware engine 67 * @in_kthread: true if we are in the context 59 * @in_kthread: true if we are in the context of the request pump thread 68 * 60 * 69 * This function checks if there is any reques 61 * This function checks if there is any request in the engine queue that 70 * needs processing and if so call out to the 62 * needs processing and if so call out to the driver to initialize hardware 71 * and handle each request. 63 * and handle each request. 72 */ 64 */ 73 static void crypto_pump_requests(struct crypto 65 static void crypto_pump_requests(struct crypto_engine *engine, 74 bool in_kthre 66 bool in_kthread) 75 { 67 { 76 struct crypto_async_request *async_req 68 struct crypto_async_request *async_req, *backlog; 77 struct crypto_engine_alg *alg; << 78 struct crypto_engine_op *op; << 79 unsigned long flags; 69 unsigned long flags; 80 bool was_busy = false; 70 bool was_busy = false; 81 int ret; 71 int ret; >> 72 struct crypto_engine_ctx *enginectx; 82 73 83 spin_lock_irqsave(&engine->queue_lock, 74 spin_lock_irqsave(&engine->queue_lock, flags); 84 75 85 /* Make sure we are not already runnin 76 /* Make sure we are not already running a request */ 86 if (!engine->retry_support && engine-> !! 77 if (engine->cur_req) 87 goto out; 78 goto out; 88 79 89 /* If another context is idling then d 80 /* If another context is idling then defer */ 90 if (engine->idling) { 81 if (engine->idling) { 91 kthread_queue_work(engine->kwo 82 kthread_queue_work(engine->kworker, &engine->pump_requests); 92 goto out; 83 goto out; 93 } 84 } 94 85 95 /* Check if the engine queue is idle * 86 /* Check if the engine queue is idle */ 96 if (!crypto_queue_len(&engine->queue) 87 if (!crypto_queue_len(&engine->queue) || !engine->running) { 97 if (!engine->busy) 88 if (!engine->busy) 98 goto out; 89 goto out; 99 90 100 /* Only do teardown in the thr 91 /* Only do teardown in the thread */ 101 if (!in_kthread) { 92 if (!in_kthread) { 102 kthread_queue_work(eng 93 kthread_queue_work(engine->kworker, 103 &en 94 &engine->pump_requests); 104 goto out; 95 goto out; 105 } 96 } 106 97 107 engine->busy = false; 98 engine->busy = false; 108 engine->idling = true; 99 engine->idling = true; 109 spin_unlock_irqrestore(&engine 100 spin_unlock_irqrestore(&engine->queue_lock, flags); 110 101 111 if (engine->unprepare_crypt_ha 102 if (engine->unprepare_crypt_hardware && 112 engine->unprepare_crypt_ha 103 engine->unprepare_crypt_hardware(engine)) 113 dev_err(engine->dev, " 104 dev_err(engine->dev, "failed to unprepare crypt hardware\n"); 114 105 115 spin_lock_irqsave(&engine->que 106 spin_lock_irqsave(&engine->queue_lock, flags); 116 engine->idling = false; 107 engine->idling = false; 117 goto out; 108 goto out; 118 } 109 } 119 110 120 start_request: << 121 /* Get the fist request from the engin 111 /* Get the fist request from the engine queue to handle */ 122 backlog = crypto_get_backlog(&engine-> 112 backlog = crypto_get_backlog(&engine->queue); 123 async_req = crypto_dequeue_request(&en 113 async_req = crypto_dequeue_request(&engine->queue); 124 if (!async_req) 114 if (!async_req) 125 goto out; 115 goto out; 126 116 127 /* !! 117 engine->cur_req = async_req; 128 * If hardware doesn't support the ret !! 118 if (backlog) 129 * keep track of the request we are pr !! 119 backlog->complete(backlog, -EINPROGRESS); 130 * We'll need it on completion (crypto << 131 */ << 132 if (!engine->retry_support) << 133 engine->cur_req = async_req; << 134 120 135 if (engine->busy) 121 if (engine->busy) 136 was_busy = true; 122 was_busy = true; 137 else 123 else 138 engine->busy = true; 124 engine->busy = true; 139 125 140 spin_unlock_irqrestore(&engine->queue_ 126 spin_unlock_irqrestore(&engine->queue_lock, flags); 141 127 142 /* Until here we get the request need 128 /* Until here we get the request need to be encrypted successfully */ 143 if (!was_busy && engine->prepare_crypt 129 if (!was_busy && engine->prepare_crypt_hardware) { 144 ret = engine->prepare_crypt_ha 130 ret = engine->prepare_crypt_hardware(engine); 145 if (ret) { 131 if (ret) { 146 dev_err(engine->dev, " 132 dev_err(engine->dev, "failed to prepare crypt hardware\n"); 147 goto req_err_1; !! 133 goto req_err; 148 } 134 } 149 } 135 } 150 136 151 if (async_req->tfm->__crt_alg->cra_fla !! 137 enginectx = crypto_tfm_ctx(async_req->tfm); 152 alg = container_of(async_req-> << 153 struct cryp << 154 op = &alg->op; << 155 } else { << 156 dev_err(engine->dev, "failed t << 157 ret = -EINVAL; << 158 goto req_err_1; << 159 } << 160 << 161 ret = op->do_one_request(engine, async << 162 138 163 /* Request unsuccessfully executed by !! 139 if (enginectx->op.prepare_request) { 164 if (ret < 0) { !! 140 ret = enginectx->op.prepare_request(engine, async_req); 165 /* !! 141 if (ret) { 166 * If hardware queue is full ( !! 142 dev_err(engine->dev, "failed to prepare request: %d\n", 167 * regardless of backlog flag. << 168 * Otherwise, unprepare and co << 169 */ << 170 if (!engine->retry_support || << 171 (ret != -ENOSPC)) { << 172 dev_err(engine->dev, << 173 "Failed to do << 174 ret); 143 ret); 175 goto req_err_1; !! 144 goto req_err; 176 } 145 } 177 spin_lock_irqsave(&engine->que !! 146 engine->cur_req_prepared = true; 178 /* << 179 * If hardware was unable to e << 180 * back in front of crypto-eng << 181 * of requests. << 182 */ << 183 crypto_enqueue_request_head(&e << 184 << 185 kthread_queue_work(engine->kwo << 186 goto out; << 187 } 147 } 188 !! 148 if (!enginectx->op.do_one_request) { 189 goto retry; !! 149 dev_err(engine->dev, "failed to do request\n"); 190 !! 150 ret = -EINVAL; 191 req_err_1: !! 151 goto req_err; 192 crypto_request_complete(async_req, ret << 193 << 194 retry: << 195 if (backlog) << 196 crypto_request_complete(backlo << 197 << 198 /* If retry mechanism is supported, se << 199 if (engine->retry_support) { << 200 spin_lock_irqsave(&engine->que << 201 goto start_request; << 202 } 152 } >> 153 ret = enginectx->op.do_one_request(engine, async_req); >> 154 if (ret) { >> 155 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret); >> 156 goto req_err; >> 157 } >> 158 return; >> 159 >> 160 req_err: >> 161 crypto_finalize_request(engine, async_req, ret); 203 return; 162 return; 204 163 205 out: 164 out: 206 spin_unlock_irqrestore(&engine->queue_ 165 spin_unlock_irqrestore(&engine->queue_lock, flags); 207 << 208 /* << 209 * Batch requests is possible only if << 210 * hardware can enqueue multiple reque << 211 */ << 212 if (engine->do_batch_requests) { << 213 ret = engine->do_batch_request << 214 if (ret) << 215 dev_err(engine->dev, " << 216 ret); << 217 } << 218 << 219 return; << 220 } 166 } 221 167 222 static void crypto_pump_work(struct kthread_wo 168 static void crypto_pump_work(struct kthread_work *work) 223 { 169 { 224 struct crypto_engine *engine = 170 struct crypto_engine *engine = 225 container_of(work, struct cryp 171 container_of(work, struct crypto_engine, pump_requests); 226 172 227 crypto_pump_requests(engine, true); 173 crypto_pump_requests(engine, true); 228 } 174 } 229 175 230 /** 176 /** 231 * crypto_transfer_request - transfer the new 177 * crypto_transfer_request - transfer the new request into the engine queue 232 * @engine: the hardware engine 178 * @engine: the hardware engine 233 * @req: the request need to be listed into th 179 * @req: the request need to be listed into the engine queue 234 * @need_pump: indicates whether queue the pum << 235 */ 180 */ 236 static int crypto_transfer_request(struct cryp 181 static int crypto_transfer_request(struct crypto_engine *engine, 237 struct cryp 182 struct crypto_async_request *req, 238 bool need_p 183 bool need_pump) 239 { 184 { 240 unsigned long flags; 185 unsigned long flags; 241 int ret; 186 int ret; 242 187 243 spin_lock_irqsave(&engine->queue_lock, 188 spin_lock_irqsave(&engine->queue_lock, flags); 244 189 245 if (!engine->running) { 190 if (!engine->running) { 246 spin_unlock_irqrestore(&engine 191 spin_unlock_irqrestore(&engine->queue_lock, flags); 247 return -ESHUTDOWN; 192 return -ESHUTDOWN; 248 } 193 } 249 194 250 ret = crypto_enqueue_request(&engine-> 195 ret = crypto_enqueue_request(&engine->queue, req); 251 196 252 if (!engine->busy && need_pump) 197 if (!engine->busy && need_pump) 253 kthread_queue_work(engine->kwo 198 kthread_queue_work(engine->kworker, &engine->pump_requests); 254 199 255 spin_unlock_irqrestore(&engine->queue_ 200 spin_unlock_irqrestore(&engine->queue_lock, flags); 256 return ret; 201 return ret; 257 } 202 } 258 203 259 /** 204 /** 260 * crypto_transfer_request_to_engine - transfe 205 * crypto_transfer_request_to_engine - transfer one request to list 261 * into the engine queue 206 * into the engine queue 262 * @engine: the hardware engine 207 * @engine: the hardware engine 263 * @req: the request need to be listed into th 208 * @req: the request need to be listed into the engine queue 264 */ 209 */ 265 static int crypto_transfer_request_to_engine(s 210 static int crypto_transfer_request_to_engine(struct crypto_engine *engine, 266 s 211 struct crypto_async_request *req) 267 { 212 { 268 return crypto_transfer_request(engine, 213 return crypto_transfer_request(engine, req, true); 269 } 214 } 270 215 271 /** 216 /** 272 * crypto_transfer_aead_request_to_engine - tr 217 * crypto_transfer_aead_request_to_engine - transfer one aead_request 273 * to list into the engine queue 218 * to list into the engine queue 274 * @engine: the hardware engine 219 * @engine: the hardware engine 275 * @req: the request need to be listed into th 220 * @req: the request need to be listed into the engine queue 276 */ 221 */ 277 int crypto_transfer_aead_request_to_engine(str 222 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, 278 str 223 struct aead_request *req) 279 { 224 { 280 return crypto_transfer_request_to_engi 225 return crypto_transfer_request_to_engine(engine, &req->base); 281 } 226 } 282 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request 227 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); 283 228 284 /** 229 /** 285 * crypto_transfer_akcipher_request_to_engine 230 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request 286 * to list into the engine queue 231 * to list into the engine queue 287 * @engine: the hardware engine 232 * @engine: the hardware engine 288 * @req: the request need to be listed into th 233 * @req: the request need to be listed into the engine queue 289 */ 234 */ 290 int crypto_transfer_akcipher_request_to_engine 235 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, 291 236 struct akcipher_request *req) 292 { 237 { 293 return crypto_transfer_request_to_engi 238 return crypto_transfer_request_to_engine(engine, &req->base); 294 } 239 } 295 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_req 240 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); 296 241 297 /** 242 /** 298 * crypto_transfer_hash_request_to_engine - tr 243 * crypto_transfer_hash_request_to_engine - transfer one ahash_request 299 * to list into the engine queue 244 * to list into the engine queue 300 * @engine: the hardware engine 245 * @engine: the hardware engine 301 * @req: the request need to be listed into th 246 * @req: the request need to be listed into the engine queue 302 */ 247 */ 303 int crypto_transfer_hash_request_to_engine(str 248 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, 304 str 249 struct ahash_request *req) 305 { 250 { 306 return crypto_transfer_request_to_engi 251 return crypto_transfer_request_to_engine(engine, &req->base); 307 } 252 } 308 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request 253 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); 309 254 310 /** 255 /** 311 * crypto_transfer_kpp_request_to_engine - tra << 312 * into the engine queue << 313 * @engine: the hardware engine << 314 * @req: the request need to be listed into th << 315 */ << 316 int crypto_transfer_kpp_request_to_engine(stru << 317 stru << 318 { << 319 return crypto_transfer_request_to_engi << 320 } << 321 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_ << 322 << 323 /** << 324 * crypto_transfer_skcipher_request_to_engine 256 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request 325 * to list into the engine queue 257 * to list into the engine queue 326 * @engine: the hardware engine 258 * @engine: the hardware engine 327 * @req: the request need to be listed into th 259 * @req: the request need to be listed into the engine queue 328 */ 260 */ 329 int crypto_transfer_skcipher_request_to_engine 261 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, 330 262 struct skcipher_request *req) 331 { 263 { 332 return crypto_transfer_request_to_engi 264 return crypto_transfer_request_to_engine(engine, &req->base); 333 } 265 } 334 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_req 266 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); 335 267 336 /** 268 /** 337 * crypto_finalize_aead_request - finalize one 269 * crypto_finalize_aead_request - finalize one aead_request if 338 * the request is done 270 * the request is done 339 * @engine: the hardware engine 271 * @engine: the hardware engine 340 * @req: the request need to be finalized 272 * @req: the request need to be finalized 341 * @err: error number 273 * @err: error number 342 */ 274 */ 343 void crypto_finalize_aead_request(struct crypt 275 void crypto_finalize_aead_request(struct crypto_engine *engine, 344 struct aead_ 276 struct aead_request *req, int err) 345 { 277 { 346 return crypto_finalize_request(engine, 278 return crypto_finalize_request(engine, &req->base, err); 347 } 279 } 348 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request 280 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); 349 281 350 /** 282 /** 351 * crypto_finalize_akcipher_request - finalize 283 * crypto_finalize_akcipher_request - finalize one akcipher_request if 352 * the request is done 284 * the request is done 353 * @engine: the hardware engine 285 * @engine: the hardware engine 354 * @req: the request need to be finalized 286 * @req: the request need to be finalized 355 * @err: error number 287 * @err: error number 356 */ 288 */ 357 void crypto_finalize_akcipher_request(struct c 289 void crypto_finalize_akcipher_request(struct crypto_engine *engine, 358 struct a 290 struct akcipher_request *req, int err) 359 { 291 { 360 return crypto_finalize_request(engine, 292 return crypto_finalize_request(engine, &req->base, err); 361 } 293 } 362 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_req 294 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); 363 295 364 /** 296 /** 365 * crypto_finalize_hash_request - finalize one 297 * crypto_finalize_hash_request - finalize one ahash_request if 366 * the request is done 298 * the request is done 367 * @engine: the hardware engine 299 * @engine: the hardware engine 368 * @req: the request need to be finalized 300 * @req: the request need to be finalized 369 * @err: error number 301 * @err: error number 370 */ 302 */ 371 void crypto_finalize_hash_request(struct crypt 303 void crypto_finalize_hash_request(struct crypto_engine *engine, 372 struct ahash 304 struct ahash_request *req, int err) 373 { 305 { 374 return crypto_finalize_request(engine, 306 return crypto_finalize_request(engine, &req->base, err); 375 } 307 } 376 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request 308 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); 377 309 378 /** 310 /** 379 * crypto_finalize_kpp_request - finalize one << 380 * @engine: the hardware engine << 381 * @req: the request need to be finalized << 382 * @err: error number << 383 */ << 384 void crypto_finalize_kpp_request(struct crypto << 385 struct kpp_re << 386 { << 387 return crypto_finalize_request(engine, << 388 } << 389 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request) << 390 << 391 /** << 392 * crypto_finalize_skcipher_request - finalize 311 * crypto_finalize_skcipher_request - finalize one skcipher_request if 393 * the request is done 312 * the request is done 394 * @engine: the hardware engine 313 * @engine: the hardware engine 395 * @req: the request need to be finalized 314 * @req: the request need to be finalized 396 * @err: error number 315 * @err: error number 397 */ 316 */ 398 void crypto_finalize_skcipher_request(struct c 317 void crypto_finalize_skcipher_request(struct crypto_engine *engine, 399 struct s 318 struct skcipher_request *req, int err) 400 { 319 { 401 return crypto_finalize_request(engine, 320 return crypto_finalize_request(engine, &req->base, err); 402 } 321 } 403 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_req 322 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); 404 323 405 /** 324 /** 406 * crypto_engine_start - start the hardware en 325 * crypto_engine_start - start the hardware engine 407 * @engine: the hardware engine need to be sta 326 * @engine: the hardware engine need to be started 408 * 327 * 409 * Return 0 on success, else on fail. 328 * Return 0 on success, else on fail. 410 */ 329 */ 411 int crypto_engine_start(struct crypto_engine * 330 int crypto_engine_start(struct crypto_engine *engine) 412 { 331 { 413 unsigned long flags; 332 unsigned long flags; 414 333 415 spin_lock_irqsave(&engine->queue_lock, 334 spin_lock_irqsave(&engine->queue_lock, flags); 416 335 417 if (engine->running || engine->busy) { 336 if (engine->running || engine->busy) { 418 spin_unlock_irqrestore(&engine 337 spin_unlock_irqrestore(&engine->queue_lock, flags); 419 return -EBUSY; 338 return -EBUSY; 420 } 339 } 421 340 422 engine->running = true; 341 engine->running = true; 423 spin_unlock_irqrestore(&engine->queue_ 342 spin_unlock_irqrestore(&engine->queue_lock, flags); 424 343 425 kthread_queue_work(engine->kworker, &e 344 kthread_queue_work(engine->kworker, &engine->pump_requests); 426 345 427 return 0; 346 return 0; 428 } 347 } 429 EXPORT_SYMBOL_GPL(crypto_engine_start); 348 EXPORT_SYMBOL_GPL(crypto_engine_start); 430 349 431 /** 350 /** 432 * crypto_engine_stop - stop the hardware engi 351 * crypto_engine_stop - stop the hardware engine 433 * @engine: the hardware engine need to be sto 352 * @engine: the hardware engine need to be stopped 434 * 353 * 435 * Return 0 on success, else on fail. 354 * Return 0 on success, else on fail. 436 */ 355 */ 437 int crypto_engine_stop(struct crypto_engine *e 356 int crypto_engine_stop(struct crypto_engine *engine) 438 { 357 { 439 unsigned long flags; 358 unsigned long flags; 440 unsigned int limit = 500; 359 unsigned int limit = 500; 441 int ret = 0; 360 int ret = 0; 442 361 443 spin_lock_irqsave(&engine->queue_lock, 362 spin_lock_irqsave(&engine->queue_lock, flags); 444 363 445 /* 364 /* 446 * If the engine queue is not empty or 365 * If the engine queue is not empty or the engine is on busy state, 447 * we need to wait for a while to pump 366 * we need to wait for a while to pump the requests of engine queue. 448 */ 367 */ 449 while ((crypto_queue_len(&engine->queu 368 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { 450 spin_unlock_irqrestore(&engine 369 spin_unlock_irqrestore(&engine->queue_lock, flags); 451 msleep(20); 370 msleep(20); 452 spin_lock_irqsave(&engine->que 371 spin_lock_irqsave(&engine->queue_lock, flags); 453 } 372 } 454 373 455 if (crypto_queue_len(&engine->queue) | 374 if (crypto_queue_len(&engine->queue) || engine->busy) 456 ret = -EBUSY; 375 ret = -EBUSY; 457 else 376 else 458 engine->running = false; 377 engine->running = false; 459 378 460 spin_unlock_irqrestore(&engine->queue_ 379 spin_unlock_irqrestore(&engine->queue_lock, flags); 461 380 462 if (ret) 381 if (ret) 463 dev_warn(engine->dev, "could n 382 dev_warn(engine->dev, "could not stop engine\n"); 464 383 465 return ret; 384 return ret; 466 } 385 } 467 EXPORT_SYMBOL_GPL(crypto_engine_stop); 386 EXPORT_SYMBOL_GPL(crypto_engine_stop); 468 387 469 /** 388 /** 470 * crypto_engine_alloc_init_and_set - allocate !! 389 * crypto_engine_alloc_init - allocate crypto hardware engine structure and 471 * and initialize it by setting the maximum nu !! 390 * initialize it. 472 * crypto-engine queue. << 473 * @dev: the device attached with one hardware 391 * @dev: the device attached with one hardware engine 474 * @retry_support: whether hardware has suppor << 475 * @cbk_do_batch: pointer to a callback functi << 476 * a batch of requests. << 477 * This has the form: << 478 * callback(struct crypto_engin << 479 * where: << 480 * engine: the crypto engine st << 481 * @rt: whether this queue is set to run as a 392 * @rt: whether this queue is set to run as a realtime task 482 * @qlen: maximum size of the crypto-engine qu << 483 * 393 * 484 * This must be called from context that can s 394 * This must be called from context that can sleep. 485 * Return: the crypto engine structure on succ 395 * Return: the crypto engine structure on success, else NULL. 486 */ 396 */ 487 struct crypto_engine *crypto_engine_alloc_init !! 397 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) 488 << 489 << 490 << 491 { 398 { >> 399 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; 492 struct crypto_engine *engine; 400 struct crypto_engine *engine; 493 401 494 if (!dev) 402 if (!dev) 495 return NULL; 403 return NULL; 496 404 497 engine = devm_kzalloc(dev, sizeof(*eng 405 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); 498 if (!engine) 406 if (!engine) 499 return NULL; 407 return NULL; 500 408 501 engine->dev = dev; 409 engine->dev = dev; 502 engine->rt = rt; 410 engine->rt = rt; 503 engine->running = false; 411 engine->running = false; 504 engine->busy = false; 412 engine->busy = false; 505 engine->idling = false; 413 engine->idling = false; 506 engine->retry_support = retry_support; !! 414 engine->cur_req_prepared = false; 507 engine->priv_data = dev; 415 engine->priv_data = dev; 508 /* << 509 * Batch requests is possible only if << 510 * hardware has support for retry mech << 511 */ << 512 engine->do_batch_requests = retry_supp << 513 << 514 snprintf(engine->name, sizeof(engine-> 416 snprintf(engine->name, sizeof(engine->name), 515 "%s-engine", dev_name(dev)); 417 "%s-engine", dev_name(dev)); 516 418 517 crypto_init_queue(&engine->queue, qlen !! 419 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); 518 spin_lock_init(&engine->queue_lock); 420 spin_lock_init(&engine->queue_lock); 519 421 520 engine->kworker = kthread_create_worke 422 engine->kworker = kthread_create_worker(0, "%s", engine->name); 521 if (IS_ERR(engine->kworker)) { 423 if (IS_ERR(engine->kworker)) { 522 dev_err(dev, "failed to create 424 dev_err(dev, "failed to create crypto request pump task\n"); 523 return NULL; 425 return NULL; 524 } 426 } 525 kthread_init_work(&engine->pump_reques 427 kthread_init_work(&engine->pump_requests, crypto_pump_work); 526 428 527 if (engine->rt) { 429 if (engine->rt) { 528 dev_info(dev, "will run reques 430 dev_info(dev, "will run requests pump with realtime priority\n"); 529 sched_set_fifo(engine->kworker !! 431 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); 530 } 432 } 531 433 532 return engine; 434 return engine; 533 } 435 } 534 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and << 535 << 536 /** << 537 * crypto_engine_alloc_init - allocate crypto << 538 * initialize it. << 539 * @dev: the device attached with one hardware << 540 * @rt: whether this queue is set to run as a << 541 * << 542 * This must be called from context that can s << 543 * Return: the crypto engine structure on succ << 544 */ << 545 struct crypto_engine *crypto_engine_alloc_init << 546 { << 547 return crypto_engine_alloc_init_and_se << 548 << 549 } << 550 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); 436 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); 551 437 552 /** 438 /** 553 * crypto_engine_exit - free the resources of 439 * crypto_engine_exit - free the resources of hardware engine when exit 554 * @engine: the hardware engine need to be fre 440 * @engine: the hardware engine need to be freed >> 441 * >> 442 * Return 0 for success. 555 */ 443 */ 556 void crypto_engine_exit(struct crypto_engine * !! 444 int crypto_engine_exit(struct crypto_engine *engine) 557 { 445 { 558 int ret; 446 int ret; 559 447 560 ret = crypto_engine_stop(engine); 448 ret = crypto_engine_stop(engine); 561 if (ret) 449 if (ret) 562 return; !! 450 return ret; 563 451 564 kthread_destroy_worker(engine->kworker 452 kthread_destroy_worker(engine->kworker); 565 } << 566 EXPORT_SYMBOL_GPL(crypto_engine_exit); << 567 << 568 int crypto_engine_register_aead(struct aead_en << 569 { << 570 if (!alg->op.do_one_request) << 571 return -EINVAL; << 572 << 573 alg->base.base.cra_flags |= CRYPTO_ALG << 574 << 575 return crypto_register_aead(&alg->base << 576 } << 577 EXPORT_SYMBOL_GPL(crypto_engine_register_aead) << 578 << 579 void crypto_engine_unregister_aead(struct aead << 580 { << 581 crypto_unregister_aead(&alg->base); << 582 } << 583 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aea << 584 << 585 int crypto_engine_register_aeads(struct aead_e << 586 { << 587 int i, ret; << 588 << 589 for (i = 0; i < count; i++) { << 590 ret = crypto_engine_register_a << 591 if (ret) << 592 goto err; << 593 } << 594 << 595 return 0; << 596 << 597 err: << 598 crypto_engine_unregister_aeads(algs, i << 599 << 600 return ret; << 601 } << 602 EXPORT_SYMBOL_GPL(crypto_engine_register_aeads << 603 << 604 void crypto_engine_unregister_aeads(struct aea << 605 { << 606 int i; << 607 << 608 for (i = count - 1; i >= 0; --i) << 609 crypto_engine_unregister_aead( << 610 } << 611 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aea << 612 << 613 int crypto_engine_register_ahash(struct ahash_ << 614 { << 615 if (!alg->op.do_one_request) << 616 return -EINVAL; << 617 << 618 alg->base.halg.base.cra_flags |= CRYPT << 619 << 620 return crypto_register_ahash(&alg->bas << 621 } << 622 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash << 623 << 624 void crypto_engine_unregister_ahash(struct aha << 625 { << 626 crypto_unregister_ahash(&alg->base); << 627 } << 628 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aha << 629 << 630 int crypto_engine_register_ahashes(struct ahas << 631 { << 632 int i, ret; << 633 << 634 for (i = 0; i < count; i++) { << 635 ret = crypto_engine_register_a << 636 if (ret) << 637 goto err; << 638 } << 639 << 640 return 0; << 641 << 642 err: << 643 crypto_engine_unregister_ahashes(algs, << 644 << 645 return ret; << 646 } << 647 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash << 648 << 649 void crypto_engine_unregister_ahashes(struct a << 650 int coun << 651 { << 652 int i; << 653 << 654 for (i = count - 1; i >= 0; --i) << 655 crypto_engine_unregister_ahash << 656 } << 657 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aha << 658 << 659 int crypto_engine_register_akcipher(struct akc << 660 { << 661 if (!alg->op.do_one_request) << 662 return -EINVAL; << 663 << 664 alg->base.base.cra_flags |= CRYPTO_ALG << 665 << 666 return crypto_register_akcipher(&alg-> << 667 } << 668 EXPORT_SYMBOL_GPL(crypto_engine_register_akcip << 669 << 670 void crypto_engine_unregister_akcipher(struct << 671 { << 672 crypto_unregister_akcipher(&alg->base) << 673 } << 674 EXPORT_SYMBOL_GPL(crypto_engine_unregister_akc << 675 << 676 int crypto_engine_register_kpp(struct kpp_engi << 677 { << 678 if (!alg->op.do_one_request) << 679 return -EINVAL; << 680 << 681 alg->base.base.cra_flags |= CRYPTO_ALG << 682 << 683 return crypto_register_kpp(&alg->base) << 684 } << 685 EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); << 686 << 687 void crypto_engine_unregister_kpp(struct kpp_e << 688 { << 689 crypto_unregister_kpp(&alg->base); << 690 } << 691 EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp << 692 << 693 int crypto_engine_register_skcipher(struct skc << 694 { << 695 if (!alg->op.do_one_request) << 696 return -EINVAL; << 697 << 698 alg->base.base.cra_flags |= CRYPTO_ALG << 699 << 700 return crypto_register_skcipher(&alg-> << 701 } << 702 EXPORT_SYMBOL_GPL(crypto_engine_register_skcip << 703 << 704 void crypto_engine_unregister_skcipher(struct << 705 { << 706 return crypto_unregister_skcipher(&alg << 707 } << 708 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skc << 709 << 710 int crypto_engine_register_skciphers(struct sk << 711 int count << 712 { << 713 int i, ret; << 714 << 715 for (i = 0; i < count; i++) { << 716 ret = crypto_engine_register_s << 717 if (ret) << 718 goto err; << 719 } << 720 453 721 return 0; 454 return 0; 722 << 723 err: << 724 crypto_engine_unregister_skciphers(alg << 725 << 726 return ret; << 727 } 455 } 728 EXPORT_SYMBOL_GPL(crypto_engine_register_skcip !! 456 EXPORT_SYMBOL_GPL(crypto_engine_exit); 729 << 730 void crypto_engine_unregister_skciphers(struct << 731 int co << 732 { << 733 int i; << 734 << 735 for (i = count - 1; i >= 0; --i) << 736 crypto_engine_unregister_skcip << 737 } << 738 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skc << 739 457 740 MODULE_LICENSE("GPL"); 458 MODULE_LICENSE("GPL"); 741 MODULE_DESCRIPTION("Crypto hardware engine fra 459 MODULE_DESCRIPTION("Crypto hardware engine framework"); 742 460
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.