1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * Handle async block request by crypto hardwa 2 * Handle async block request by crypto hardware engine. 4 * 3 * 5 * Copyright (C) 2016 Linaro, Inc. 4 * Copyright (C) 2016 Linaro, Inc. 6 * 5 * 7 * Author: Baolin Wang <baolin.wang@linaro.org 6 * Author: Baolin Wang <baolin.wang@linaro.org> >> 7 * >> 8 * This program is free software; you can redistribute it and/or modify it >> 9 * under the terms of the GNU General Public License as published by the Free >> 10 * Software Foundation; either version 2 of the License, or (at your option) >> 11 * any later version. >> 12 * 8 */ 13 */ 9 14 10 #include <crypto/internal/aead.h> << 11 #include <crypto/internal/akcipher.h> << 12 #include <crypto/internal/engine.h> << 13 #include <crypto/internal/hash.h> << 14 #include <crypto/internal/kpp.h> << 15 #include <crypto/internal/skcipher.h> << 16 #include <linux/err.h> 15 #include <linux/err.h> 17 #include <linux/delay.h> 16 #include <linux/delay.h> 18 #include <linux/device.h> !! 17 #include <crypto/engine.h> 19 #include <linux/kernel.h> << 20 #include <linux/module.h> << 21 #include <uapi/linux/sched/types.h> 18 #include <uapi/linux/sched/types.h> 22 #include "internal.h" 19 #include "internal.h" 23 20 24 #define CRYPTO_ENGINE_MAX_QLEN 10 21 #define CRYPTO_ENGINE_MAX_QLEN 10 25 22 26 /* Temporary algorithm flag used to indicate a << 27 #define CRYPTO_ALG_ENGINE 0x200 << 28 << 29 struct crypto_engine_alg { << 30 struct crypto_alg base; << 31 struct crypto_engine_op op; << 32 }; << 33 << 34 /** 23 /** 35 * crypto_finalize_request - finalize one requ 24 * crypto_finalize_request - finalize one request if the request is done 36 * @engine: the hardware engine 25 * @engine: the hardware engine 37 * @req: the request need to be finalized 26 * @req: the request need to be finalized 38 * @err: error number 27 * @err: error number 39 */ 28 */ 40 static void crypto_finalize_request(struct cry 29 static void crypto_finalize_request(struct crypto_engine *engine, 41 struct cry !! 30 struct crypto_async_request *req, int err) 42 { 31 { 43 unsigned long flags; 32 unsigned long flags; >> 33 bool finalize_cur_req = false; >> 34 int ret; >> 35 struct crypto_engine_ctx *enginectx; 44 36 45 /* !! 37 spin_lock_irqsave(&engine->queue_lock, flags); 46 * If hardware cannot enqueue more req !! 38 if (engine->cur_req == req) 47 * and retry mechanism is not supporte !! 39 finalize_cur_req = true; 48 * make sure we are completing the cur !! 40 spin_unlock_irqrestore(&engine->queue_lock, flags); 49 */ !! 41 50 if (!engine->retry_support) { !! 42 if (finalize_cur_req) { 51 spin_lock_irqsave(&engine->que !! 43 enginectx = crypto_tfm_ctx(req->tfm); 52 if (engine->cur_req == req) { !! 44 if (engine->cur_req_prepared && 53 engine->cur_req = NULL !! 45 enginectx->op.unprepare_request) { >> 46 ret = enginectx->op.unprepare_request(engine, req); >> 47 if (ret) >> 48 dev_err(engine->dev, "failed to unprepare request\n"); 54 } 49 } >> 50 spin_lock_irqsave(&engine->queue_lock, flags); >> 51 engine->cur_req = NULL; >> 52 engine->cur_req_prepared = false; 55 spin_unlock_irqrestore(&engine 53 spin_unlock_irqrestore(&engine->queue_lock, flags); 56 } 54 } 57 55 58 lockdep_assert_in_softirq(); !! 56 req->complete(req, err); 59 crypto_request_complete(req, err); << 60 57 61 kthread_queue_work(engine->kworker, &e 58 kthread_queue_work(engine->kworker, &engine->pump_requests); 62 } 59 } 63 60 64 /** 61 /** 65 * crypto_pump_requests - dequeue one request 62 * crypto_pump_requests - dequeue one request from engine queue to process 66 * @engine: the hardware engine 63 * @engine: the hardware engine 67 * @in_kthread: true if we are in the context 64 * @in_kthread: true if we are in the context of the request pump thread 68 * 65 * 69 * This function checks if there is any reques 66 * This function checks if there is any request in the engine queue that 70 * needs processing and if so call out to the 67 * needs processing and if so call out to the driver to initialize hardware 71 * and handle each request. 68 * and handle each request. 72 */ 69 */ 73 static void crypto_pump_requests(struct crypto 70 static void crypto_pump_requests(struct crypto_engine *engine, 74 bool in_kthre 71 bool in_kthread) 75 { 72 { 76 struct crypto_async_request *async_req 73 struct crypto_async_request *async_req, *backlog; 77 struct crypto_engine_alg *alg; << 78 struct crypto_engine_op *op; << 79 unsigned long flags; 74 unsigned long flags; 80 bool was_busy = false; 75 bool was_busy = false; 81 int ret; 76 int ret; >> 77 struct crypto_engine_ctx *enginectx; 82 78 83 spin_lock_irqsave(&engine->queue_lock, 79 spin_lock_irqsave(&engine->queue_lock, flags); 84 80 85 /* Make sure we are not already runnin 81 /* Make sure we are not already running a request */ 86 if (!engine->retry_support && engine-> !! 82 if (engine->cur_req) 87 goto out; 83 goto out; 88 84 89 /* If another context is idling then d 85 /* If another context is idling then defer */ 90 if (engine->idling) { 86 if (engine->idling) { 91 kthread_queue_work(engine->kwo 87 kthread_queue_work(engine->kworker, &engine->pump_requests); 92 goto out; 88 goto out; 93 } 89 } 94 90 95 /* Check if the engine queue is idle * 91 /* Check if the engine queue is idle */ 96 if (!crypto_queue_len(&engine->queue) 92 if (!crypto_queue_len(&engine->queue) || !engine->running) { 97 if (!engine->busy) 93 if (!engine->busy) 98 goto out; 94 goto out; 99 95 100 /* Only do teardown in the thr 96 /* Only do teardown in the thread */ 101 if (!in_kthread) { 97 if (!in_kthread) { 102 kthread_queue_work(eng 98 kthread_queue_work(engine->kworker, 103 &en 99 &engine->pump_requests); 104 goto out; 100 goto out; 105 } 101 } 106 102 107 engine->busy = false; 103 engine->busy = false; 108 engine->idling = true; 104 engine->idling = true; 109 spin_unlock_irqrestore(&engine 105 spin_unlock_irqrestore(&engine->queue_lock, flags); 110 106 111 if (engine->unprepare_crypt_ha 107 if (engine->unprepare_crypt_hardware && 112 engine->unprepare_crypt_ha 108 engine->unprepare_crypt_hardware(engine)) 113 dev_err(engine->dev, " 109 dev_err(engine->dev, "failed to unprepare crypt hardware\n"); 114 110 115 spin_lock_irqsave(&engine->que 111 spin_lock_irqsave(&engine->queue_lock, flags); 116 engine->idling = false; 112 engine->idling = false; 117 goto out; 113 goto out; 118 } 114 } 119 115 120 start_request: << 121 /* Get the fist request from the engin 116 /* Get the fist request from the engine queue to handle */ 122 backlog = crypto_get_backlog(&engine-> 117 backlog = crypto_get_backlog(&engine->queue); 123 async_req = crypto_dequeue_request(&en 118 async_req = crypto_dequeue_request(&engine->queue); 124 if (!async_req) 119 if (!async_req) 125 goto out; 120 goto out; 126 121 127 /* !! 122 engine->cur_req = async_req; 128 * If hardware doesn't support the ret !! 123 if (backlog) 129 * keep track of the request we are pr !! 124 backlog->complete(backlog, -EINPROGRESS); 130 * We'll need it on completion (crypto << 131 */ << 132 if (!engine->retry_support) << 133 engine->cur_req = async_req; << 134 125 135 if (engine->busy) 126 if (engine->busy) 136 was_busy = true; 127 was_busy = true; 137 else 128 else 138 engine->busy = true; 129 engine->busy = true; 139 130 140 spin_unlock_irqrestore(&engine->queue_ 131 spin_unlock_irqrestore(&engine->queue_lock, flags); 141 132 142 /* Until here we get the request need 133 /* Until here we get the request need to be encrypted successfully */ 143 if (!was_busy && engine->prepare_crypt 134 if (!was_busy && engine->prepare_crypt_hardware) { 144 ret = engine->prepare_crypt_ha 135 ret = engine->prepare_crypt_hardware(engine); 145 if (ret) { 136 if (ret) { 146 dev_err(engine->dev, " 137 dev_err(engine->dev, "failed to prepare crypt hardware\n"); 147 goto req_err_1; !! 138 goto req_err; 148 } 139 } 149 } 140 } 150 141 151 if (async_req->tfm->__crt_alg->cra_fla !! 142 enginectx = crypto_tfm_ctx(async_req->tfm); 152 alg = container_of(async_req-> << 153 struct cryp << 154 op = &alg->op; << 155 } else { << 156 dev_err(engine->dev, "failed t << 157 ret = -EINVAL; << 158 goto req_err_1; << 159 } << 160 143 161 ret = op->do_one_request(engine, async !! 144 if (enginectx->op.prepare_request) { 162 !! 145 ret = enginectx->op.prepare_request(engine, async_req); 163 /* Request unsuccessfully executed by !! 146 if (ret) { 164 if (ret < 0) { !! 147 dev_err(engine->dev, "failed to prepare request: %d\n", 165 /* << 166 * If hardware queue is full ( << 167 * regardless of backlog flag. << 168 * Otherwise, unprepare and co << 169 */ << 170 if (!engine->retry_support || << 171 (ret != -ENOSPC)) { << 172 dev_err(engine->dev, << 173 "Failed to do << 174 ret); 148 ret); 175 goto req_err_1; !! 149 goto req_err; 176 } 150 } 177 spin_lock_irqsave(&engine->que !! 151 engine->cur_req_prepared = true; 178 /* << 179 * If hardware was unable to e << 180 * back in front of crypto-eng << 181 * of requests. << 182 */ << 183 crypto_enqueue_request_head(&e << 184 << 185 kthread_queue_work(engine->kwo << 186 goto out; << 187 } 152 } 188 !! 153 if (!enginectx->op.do_one_request) { 189 goto retry; !! 154 dev_err(engine->dev, "failed to do request\n"); 190 !! 155 ret = -EINVAL; 191 req_err_1: !! 156 goto req_err; 192 crypto_request_complete(async_req, ret << 193 << 194 retry: << 195 if (backlog) << 196 crypto_request_complete(backlo << 197 << 198 /* If retry mechanism is supported, se << 199 if (engine->retry_support) { << 200 spin_lock_irqsave(&engine->que << 201 goto start_request; << 202 } 157 } >> 158 ret = enginectx->op.do_one_request(engine, async_req); >> 159 if (ret) { >> 160 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret); >> 161 goto req_err; >> 162 } >> 163 return; >> 164 >> 165 req_err: >> 166 crypto_finalize_request(engine, async_req, ret); 203 return; 167 return; 204 168 205 out: 169 out: 206 spin_unlock_irqrestore(&engine->queue_ 170 spin_unlock_irqrestore(&engine->queue_lock, flags); 207 << 208 /* << 209 * Batch requests is possible only if << 210 * hardware can enqueue multiple reque << 211 */ << 212 if (engine->do_batch_requests) { << 213 ret = engine->do_batch_request << 214 if (ret) << 215 dev_err(engine->dev, " << 216 ret); << 217 } << 218 << 219 return; << 220 } 171 } 221 172 222 static void crypto_pump_work(struct kthread_wo 173 static void crypto_pump_work(struct kthread_work *work) 223 { 174 { 224 struct crypto_engine *engine = 175 struct crypto_engine *engine = 225 container_of(work, struct cryp 176 container_of(work, struct crypto_engine, pump_requests); 226 177 227 crypto_pump_requests(engine, true); 178 crypto_pump_requests(engine, true); 228 } 179 } 229 180 230 /** 181 /** 231 * crypto_transfer_request - transfer the new 182 * crypto_transfer_request - transfer the new request into the engine queue 232 * @engine: the hardware engine 183 * @engine: the hardware engine 233 * @req: the request need to be listed into th 184 * @req: the request need to be listed into the engine queue 234 * @need_pump: indicates whether queue the pum << 235 */ 185 */ 236 static int crypto_transfer_request(struct cryp 186 static int crypto_transfer_request(struct crypto_engine *engine, 237 struct cryp 187 struct crypto_async_request *req, 238 bool need_p 188 bool need_pump) 239 { 189 { 240 unsigned long flags; 190 unsigned long flags; 241 int ret; 191 int ret; 242 192 243 spin_lock_irqsave(&engine->queue_lock, 193 spin_lock_irqsave(&engine->queue_lock, flags); 244 194 245 if (!engine->running) { 195 if (!engine->running) { 246 spin_unlock_irqrestore(&engine 196 spin_unlock_irqrestore(&engine->queue_lock, flags); 247 return -ESHUTDOWN; 197 return -ESHUTDOWN; 248 } 198 } 249 199 250 ret = crypto_enqueue_request(&engine-> 200 ret = crypto_enqueue_request(&engine->queue, req); 251 201 252 if (!engine->busy && need_pump) 202 if (!engine->busy && need_pump) 253 kthread_queue_work(engine->kwo 203 kthread_queue_work(engine->kworker, &engine->pump_requests); 254 204 255 spin_unlock_irqrestore(&engine->queue_ 205 spin_unlock_irqrestore(&engine->queue_lock, flags); 256 return ret; 206 return ret; 257 } 207 } 258 208 259 /** 209 /** 260 * crypto_transfer_request_to_engine - transfe 210 * crypto_transfer_request_to_engine - transfer one request to list 261 * into the engine queue 211 * into the engine queue 262 * @engine: the hardware engine 212 * @engine: the hardware engine 263 * @req: the request need to be listed into th 213 * @req: the request need to be listed into the engine queue 264 */ 214 */ 265 static int crypto_transfer_request_to_engine(s 215 static int crypto_transfer_request_to_engine(struct crypto_engine *engine, 266 s 216 struct crypto_async_request *req) 267 { 217 { 268 return crypto_transfer_request(engine, 218 return crypto_transfer_request(engine, req, true); 269 } 219 } 270 220 271 /** 221 /** >> 222 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request >> 223 * to list into the engine queue >> 224 * @engine: the hardware engine >> 225 * @req: the request need to be listed into the engine queue >> 226 * TODO: Remove this function when skcipher conversion is finished >> 227 */ >> 228 int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine, >> 229 struct ablkcipher_request *req) >> 230 { >> 231 return crypto_transfer_request_to_engine(engine, &req->base); >> 232 } >> 233 EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine); >> 234 >> 235 /** 272 * crypto_transfer_aead_request_to_engine - tr 236 * crypto_transfer_aead_request_to_engine - transfer one aead_request 273 * to list into the engine queue 237 * to list into the engine queue 274 * @engine: the hardware engine 238 * @engine: the hardware engine 275 * @req: the request need to be listed into th 239 * @req: the request need to be listed into the engine queue 276 */ 240 */ 277 int crypto_transfer_aead_request_to_engine(str 241 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, 278 str 242 struct aead_request *req) 279 { 243 { 280 return crypto_transfer_request_to_engi 244 return crypto_transfer_request_to_engine(engine, &req->base); 281 } 245 } 282 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request 246 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); 283 247 284 /** 248 /** 285 * crypto_transfer_akcipher_request_to_engine 249 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request 286 * to list into the engine queue 250 * to list into the engine queue 287 * @engine: the hardware engine 251 * @engine: the hardware engine 288 * @req: the request need to be listed into th 252 * @req: the request need to be listed into the engine queue 289 */ 253 */ 290 int crypto_transfer_akcipher_request_to_engine 254 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, 291 255 struct akcipher_request *req) 292 { 256 { 293 return crypto_transfer_request_to_engi 257 return crypto_transfer_request_to_engine(engine, &req->base); 294 } 258 } 295 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_req 259 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); 296 260 297 /** 261 /** 298 * crypto_transfer_hash_request_to_engine - tr 262 * crypto_transfer_hash_request_to_engine - transfer one ahash_request 299 * to list into the engine queue 263 * to list into the engine queue 300 * @engine: the hardware engine 264 * @engine: the hardware engine 301 * @req: the request need to be listed into th 265 * @req: the request need to be listed into the engine queue 302 */ 266 */ 303 int crypto_transfer_hash_request_to_engine(str 267 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, 304 str 268 struct ahash_request *req) 305 { 269 { 306 return crypto_transfer_request_to_engi 270 return crypto_transfer_request_to_engine(engine, &req->base); 307 } 271 } 308 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request 272 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); 309 273 310 /** 274 /** 311 * crypto_transfer_kpp_request_to_engine - tra !! 275 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request 312 * into the engine queue !! 276 * to list into the engine queue 313 * @engine: the hardware engine 277 * @engine: the hardware engine 314 * @req: the request need to be listed into th 278 * @req: the request need to be listed into the engine queue 315 */ 279 */ 316 int crypto_transfer_kpp_request_to_engine(stru !! 280 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, 317 stru !! 281 struct skcipher_request *req) 318 { 282 { 319 return crypto_transfer_request_to_engi 283 return crypto_transfer_request_to_engine(engine, &req->base); 320 } 284 } 321 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_ !! 285 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); 322 286 323 /** 287 /** 324 * crypto_transfer_skcipher_request_to_engine !! 288 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if 325 * to list into the engine queue !! 289 * the request is done 326 * @engine: the hardware engine 290 * @engine: the hardware engine 327 * @req: the request need to be listed into th !! 291 * @req: the request need to be finalized >> 292 * @err: error number >> 293 * TODO: Remove this function when skcipher conversion is finished 328 */ 294 */ 329 int crypto_transfer_skcipher_request_to_engine !! 295 void crypto_finalize_ablkcipher_request(struct crypto_engine *engine, 330 !! 296 struct ablkcipher_request *req, int err) 331 { 297 { 332 return crypto_transfer_request_to_engi !! 298 return crypto_finalize_request(engine, &req->base, err); 333 } 299 } 334 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_req !! 300 EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request); 335 301 336 /** 302 /** 337 * crypto_finalize_aead_request - finalize one 303 * crypto_finalize_aead_request - finalize one aead_request if 338 * the request is done 304 * the request is done 339 * @engine: the hardware engine 305 * @engine: the hardware engine 340 * @req: the request need to be finalized 306 * @req: the request need to be finalized 341 * @err: error number 307 * @err: error number 342 */ 308 */ 343 void crypto_finalize_aead_request(struct crypt 309 void crypto_finalize_aead_request(struct crypto_engine *engine, 344 struct aead_ 310 struct aead_request *req, int err) 345 { 311 { 346 return crypto_finalize_request(engine, 312 return crypto_finalize_request(engine, &req->base, err); 347 } 313 } 348 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request 314 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); 349 315 350 /** 316 /** 351 * crypto_finalize_akcipher_request - finalize 317 * crypto_finalize_akcipher_request - finalize one akcipher_request if 352 * the request is done 318 * the request is done 353 * @engine: the hardware engine 319 * @engine: the hardware engine 354 * @req: the request need to be finalized 320 * @req: the request need to be finalized 355 * @err: error number 321 * @err: error number 356 */ 322 */ 357 void crypto_finalize_akcipher_request(struct c 323 void crypto_finalize_akcipher_request(struct crypto_engine *engine, 358 struct a 324 struct akcipher_request *req, int err) 359 { 325 { 360 return crypto_finalize_request(engine, 326 return crypto_finalize_request(engine, &req->base, err); 361 } 327 } 362 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_req 328 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); 363 329 364 /** 330 /** 365 * crypto_finalize_hash_request - finalize one 331 * crypto_finalize_hash_request - finalize one ahash_request if 366 * the request is done 332 * the request is done 367 * @engine: the hardware engine 333 * @engine: the hardware engine 368 * @req: the request need to be finalized 334 * @req: the request need to be finalized 369 * @err: error number 335 * @err: error number 370 */ 336 */ 371 void crypto_finalize_hash_request(struct crypt 337 void crypto_finalize_hash_request(struct crypto_engine *engine, 372 struct ahash 338 struct ahash_request *req, int err) 373 { 339 { 374 return crypto_finalize_request(engine, 340 return crypto_finalize_request(engine, &req->base, err); 375 } 341 } 376 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request 342 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); 377 343 378 /** 344 /** 379 * crypto_finalize_kpp_request - finalize one << 380 * @engine: the hardware engine << 381 * @req: the request need to be finalized << 382 * @err: error number << 383 */ << 384 void crypto_finalize_kpp_request(struct crypto << 385 struct kpp_re << 386 { << 387 return crypto_finalize_request(engine, << 388 } << 389 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request) << 390 << 391 /** << 392 * crypto_finalize_skcipher_request - finalize 345 * crypto_finalize_skcipher_request - finalize one skcipher_request if 393 * the request is done 346 * the request is done 394 * @engine: the hardware engine 347 * @engine: the hardware engine 395 * @req: the request need to be finalized 348 * @req: the request need to be finalized 396 * @err: error number 349 * @err: error number 397 */ 350 */ 398 void crypto_finalize_skcipher_request(struct c 351 void crypto_finalize_skcipher_request(struct crypto_engine *engine, 399 struct s 352 struct skcipher_request *req, int err) 400 { 353 { 401 return crypto_finalize_request(engine, 354 return crypto_finalize_request(engine, &req->base, err); 402 } 355 } 403 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_req 356 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); 404 357 405 /** 358 /** 406 * crypto_engine_start - start the hardware en 359 * crypto_engine_start - start the hardware engine 407 * @engine: the hardware engine need to be sta 360 * @engine: the hardware engine need to be started 408 * 361 * 409 * Return 0 on success, else on fail. 362 * Return 0 on success, else on fail. 410 */ 363 */ 411 int crypto_engine_start(struct crypto_engine * 364 int crypto_engine_start(struct crypto_engine *engine) 412 { 365 { 413 unsigned long flags; 366 unsigned long flags; 414 367 415 spin_lock_irqsave(&engine->queue_lock, 368 spin_lock_irqsave(&engine->queue_lock, flags); 416 369 417 if (engine->running || engine->busy) { 370 if (engine->running || engine->busy) { 418 spin_unlock_irqrestore(&engine 371 spin_unlock_irqrestore(&engine->queue_lock, flags); 419 return -EBUSY; 372 return -EBUSY; 420 } 373 } 421 374 422 engine->running = true; 375 engine->running = true; 423 spin_unlock_irqrestore(&engine->queue_ 376 spin_unlock_irqrestore(&engine->queue_lock, flags); 424 377 425 kthread_queue_work(engine->kworker, &e 378 kthread_queue_work(engine->kworker, &engine->pump_requests); 426 379 427 return 0; 380 return 0; 428 } 381 } 429 EXPORT_SYMBOL_GPL(crypto_engine_start); 382 EXPORT_SYMBOL_GPL(crypto_engine_start); 430 383 431 /** 384 /** 432 * crypto_engine_stop - stop the hardware engi 385 * crypto_engine_stop - stop the hardware engine 433 * @engine: the hardware engine need to be sto 386 * @engine: the hardware engine need to be stopped 434 * 387 * 435 * Return 0 on success, else on fail. 388 * Return 0 on success, else on fail. 436 */ 389 */ 437 int crypto_engine_stop(struct crypto_engine *e 390 int crypto_engine_stop(struct crypto_engine *engine) 438 { 391 { 439 unsigned long flags; 392 unsigned long flags; 440 unsigned int limit = 500; 393 unsigned int limit = 500; 441 int ret = 0; 394 int ret = 0; 442 395 443 spin_lock_irqsave(&engine->queue_lock, 396 spin_lock_irqsave(&engine->queue_lock, flags); 444 397 445 /* 398 /* 446 * If the engine queue is not empty or 399 * If the engine queue is not empty or the engine is on busy state, 447 * we need to wait for a while to pump 400 * we need to wait for a while to pump the requests of engine queue. 448 */ 401 */ 449 while ((crypto_queue_len(&engine->queu 402 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { 450 spin_unlock_irqrestore(&engine 403 spin_unlock_irqrestore(&engine->queue_lock, flags); 451 msleep(20); 404 msleep(20); 452 spin_lock_irqsave(&engine->que 405 spin_lock_irqsave(&engine->queue_lock, flags); 453 } 406 } 454 407 455 if (crypto_queue_len(&engine->queue) | 408 if (crypto_queue_len(&engine->queue) || engine->busy) 456 ret = -EBUSY; 409 ret = -EBUSY; 457 else 410 else 458 engine->running = false; 411 engine->running = false; 459 412 460 spin_unlock_irqrestore(&engine->queue_ 413 spin_unlock_irqrestore(&engine->queue_lock, flags); 461 414 462 if (ret) 415 if (ret) 463 dev_warn(engine->dev, "could n 416 dev_warn(engine->dev, "could not stop engine\n"); 464 417 465 return ret; 418 return ret; 466 } 419 } 467 EXPORT_SYMBOL_GPL(crypto_engine_stop); 420 EXPORT_SYMBOL_GPL(crypto_engine_stop); 468 421 469 /** 422 /** 470 * crypto_engine_alloc_init_and_set - allocate !! 423 * crypto_engine_alloc_init - allocate crypto hardware engine structure and 471 * and initialize it by setting the maximum nu !! 424 * initialize it. 472 * crypto-engine queue. << 473 * @dev: the device attached with one hardware 425 * @dev: the device attached with one hardware engine 474 * @retry_support: whether hardware has suppor << 475 * @cbk_do_batch: pointer to a callback functi << 476 * a batch of requests. << 477 * This has the form: << 478 * callback(struct crypto_engin << 479 * where: << 480 * engine: the crypto engine st << 481 * @rt: whether this queue is set to run as a 426 * @rt: whether this queue is set to run as a realtime task 482 * @qlen: maximum size of the crypto-engine qu << 483 * 427 * 484 * This must be called from context that can s 428 * This must be called from context that can sleep. 485 * Return: the crypto engine structure on succ 429 * Return: the crypto engine structure on success, else NULL. 486 */ 430 */ 487 struct crypto_engine *crypto_engine_alloc_init !! 431 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) 488 << 489 << 490 << 491 { 432 { >> 433 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 492 struct crypto_engine *engine; 434 struct crypto_engine *engine; 493 435 494 if (!dev) 436 if (!dev) 495 return NULL; 437 return NULL; 496 438 497 engine = devm_kzalloc(dev, sizeof(*eng 439 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); 498 if (!engine) 440 if (!engine) 499 return NULL; 441 return NULL; 500 442 501 engine->dev = dev; 443 engine->dev = dev; 502 engine->rt = rt; 444 engine->rt = rt; 503 engine->running = false; 445 engine->running = false; 504 engine->busy = false; 446 engine->busy = false; 505 engine->idling = false; 447 engine->idling = false; 506 engine->retry_support = retry_support; !! 448 engine->cur_req_prepared = false; 507 engine->priv_data = dev; 449 engine->priv_data = dev; 508 /* << 509 * Batch requests is possible only if << 510 * hardware has support for retry mech << 511 */ << 512 engine->do_batch_requests = retry_supp << 513 << 514 snprintf(engine->name, sizeof(engine-> 450 snprintf(engine->name, sizeof(engine->name), 515 "%s-engine", dev_name(dev)); 451 "%s-engine", dev_name(dev)); 516 452 517 crypto_init_queue(&engine->queue, qlen !! 453 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); 518 spin_lock_init(&engine->queue_lock); 454 spin_lock_init(&engine->queue_lock); 519 455 520 engine->kworker = kthread_create_worke 456 engine->kworker = kthread_create_worker(0, "%s", engine->name); 521 if (IS_ERR(engine->kworker)) { 457 if (IS_ERR(engine->kworker)) { 522 dev_err(dev, "failed to create 458 dev_err(dev, "failed to create crypto request pump task\n"); 523 return NULL; 459 return NULL; 524 } 460 } 525 kthread_init_work(&engine->pump_reques 461 kthread_init_work(&engine->pump_requests, crypto_pump_work); 526 462 527 if (engine->rt) { 463 if (engine->rt) { 528 dev_info(dev, "will run reques 464 dev_info(dev, "will run requests pump with realtime priority\n"); 529 sched_set_fifo(engine->kworker !! 465 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); 530 } 466 } 531 467 532 return engine; 468 return engine; 533 } 469 } 534 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and << 535 << 536 /** << 537 * crypto_engine_alloc_init - allocate crypto << 538 * initialize it. << 539 * @dev: the device attached with one hardware << 540 * @rt: whether this queue is set to run as a << 541 * << 542 * This must be called from context that can s << 543 * Return: the crypto engine structure on succ << 544 */ << 545 struct crypto_engine *crypto_engine_alloc_init << 546 { << 547 return crypto_engine_alloc_init_and_se << 548 << 549 } << 550 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); 470 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); 551 471 552 /** 472 /** 553 * crypto_engine_exit - free the resources of 473 * crypto_engine_exit - free the resources of hardware engine when exit 554 * @engine: the hardware engine need to be fre 474 * @engine: the hardware engine need to be freed >> 475 * >> 476 * Return 0 for success. 555 */ 477 */ 556 void crypto_engine_exit(struct crypto_engine * !! 478 int crypto_engine_exit(struct crypto_engine *engine) 557 { 479 { 558 int ret; 480 int ret; 559 481 560 ret = crypto_engine_stop(engine); 482 ret = crypto_engine_stop(engine); 561 if (ret) 483 if (ret) 562 return; !! 484 return ret; 563 485 564 kthread_destroy_worker(engine->kworker 486 kthread_destroy_worker(engine->kworker); 565 } << 566 EXPORT_SYMBOL_GPL(crypto_engine_exit); << 567 << 568 int crypto_engine_register_aead(struct aead_en << 569 { << 570 if (!alg->op.do_one_request) << 571 return -EINVAL; << 572 << 573 alg->base.base.cra_flags |= CRYPTO_ALG << 574 << 575 return crypto_register_aead(&alg->base << 576 } << 577 EXPORT_SYMBOL_GPL(crypto_engine_register_aead) << 578 << 579 void crypto_engine_unregister_aead(struct aead << 580 { << 581 crypto_unregister_aead(&alg->base); << 582 } << 583 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aea << 584 << 585 int crypto_engine_register_aeads(struct aead_e << 586 { << 587 int i, ret; << 588 << 589 for (i = 0; i < count; i++) { << 590 ret = crypto_engine_register_a << 591 if (ret) << 592 goto err; << 593 } << 594 487 595 return 0; 488 return 0; 596 << 597 err: << 598 crypto_engine_unregister_aeads(algs, i << 599 << 600 return ret; << 601 } << 602 EXPORT_SYMBOL_GPL(crypto_engine_register_aeads << 603 << 604 void crypto_engine_unregister_aeads(struct aea << 605 { << 606 int i; << 607 << 608 for (i = count - 1; i >= 0; --i) << 609 crypto_engine_unregister_aead( << 610 } << 611 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aea << 612 << 613 int crypto_engine_register_ahash(struct ahash_ << 614 { << 615 if (!alg->op.do_one_request) << 616 return -EINVAL; << 617 << 618 alg->base.halg.base.cra_flags |= CRYPT << 619 << 620 return crypto_register_ahash(&alg->bas << 621 } << 622 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash << 623 << 624 void crypto_engine_unregister_ahash(struct aha << 625 { << 626 crypto_unregister_ahash(&alg->base); << 627 } 489 } 628 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aha !! 490 EXPORT_SYMBOL_GPL(crypto_engine_exit); 629 << 630 int crypto_engine_register_ahashes(struct ahas << 631 { << 632 int i, ret; << 633 << 634 for (i = 0; i < count; i++) { << 635 ret = crypto_engine_register_a << 636 if (ret) << 637 goto err; << 638 } << 639 << 640 return 0; << 641 << 642 err: << 643 crypto_engine_unregister_ahashes(algs, << 644 << 645 return ret; << 646 } << 647 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash << 648 << 649 void crypto_engine_unregister_ahashes(struct a << 650 int coun << 651 { << 652 int i; << 653 << 654 for (i = count - 1; i >= 0; --i) << 655 crypto_engine_unregister_ahash << 656 } << 657 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aha << 658 << 659 int crypto_engine_register_akcipher(struct akc << 660 { << 661 if (!alg->op.do_one_request) << 662 return -EINVAL; << 663 << 664 alg->base.base.cra_flags |= CRYPTO_ALG << 665 << 666 return crypto_register_akcipher(&alg-> << 667 } << 668 EXPORT_SYMBOL_GPL(crypto_engine_register_akcip << 669 << 670 void crypto_engine_unregister_akcipher(struct << 671 { << 672 crypto_unregister_akcipher(&alg->base) << 673 } << 674 EXPORT_SYMBOL_GPL(crypto_engine_unregister_akc << 675 << 676 int crypto_engine_register_kpp(struct kpp_engi << 677 { << 678 if (!alg->op.do_one_request) << 679 return -EINVAL; << 680 << 681 alg->base.base.cra_flags |= CRYPTO_ALG << 682 << 683 return crypto_register_kpp(&alg->base) << 684 } << 685 EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); << 686 << 687 void crypto_engine_unregister_kpp(struct kpp_e << 688 { << 689 crypto_unregister_kpp(&alg->base); << 690 } << 691 EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp << 692 << 693 int crypto_engine_register_skcipher(struct skc << 694 { << 695 if (!alg->op.do_one_request) << 696 return -EINVAL; << 697 << 698 alg->base.base.cra_flags |= CRYPTO_ALG << 699 << 700 return crypto_register_skcipher(&alg-> << 701 } << 702 EXPORT_SYMBOL_GPL(crypto_engine_register_skcip << 703 << 704 void crypto_engine_unregister_skcipher(struct << 705 { << 706 return crypto_unregister_skcipher(&alg << 707 } << 708 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skc << 709 << 710 int crypto_engine_register_skciphers(struct sk << 711 int count << 712 { << 713 int i, ret; << 714 << 715 for (i = 0; i < count; i++) { << 716 ret = crypto_engine_register_s << 717 if (ret) << 718 goto err; << 719 } << 720 << 721 return 0; << 722 << 723 err: << 724 crypto_engine_unregister_skciphers(alg << 725 << 726 return ret; << 727 } << 728 EXPORT_SYMBOL_GPL(crypto_engine_register_skcip << 729 << 730 void crypto_engine_unregister_skciphers(struct << 731 int co << 732 { << 733 int i; << 734 << 735 for (i = count - 1; i >= 0; --i) << 736 crypto_engine_unregister_skcip << 737 } << 738 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skc << 739 491 740 MODULE_LICENSE("GPL"); 492 MODULE_LICENSE("GPL"); 741 MODULE_DESCRIPTION("Crypto hardware engine fra 493 MODULE_DESCRIPTION("Crypto hardware engine framework"); 742 494
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.