crypto_engine.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Handle async block request by crypto hardware engine.
  4. *
  5. * Copyright (C) 2016 Linaro, Inc.
  6. *
  7. * Author: Baolin Wang <[email protected]>
  8. */
  9. #include <linux/err.h>
  10. #include <linux/delay.h>
  11. #include <linux/device.h>
  12. #include <crypto/engine.h>
  13. #include <uapi/linux/sched/types.h>
  14. #include "internal.h"
  15. #define CRYPTO_ENGINE_MAX_QLEN 10
  16. /**
  17. * crypto_finalize_request - finalize one request if the request is done
  18. * @engine: the hardware engine
  19. * @req: the request need to be finalized
  20. * @err: error number
  21. */
  22. static void crypto_finalize_request(struct crypto_engine *engine,
  23. struct crypto_async_request *req, int err)
  24. {
  25. unsigned long flags;
  26. bool finalize_req = false;
  27. int ret;
  28. struct crypto_engine_ctx *enginectx;
  29. /*
  30. * If hardware cannot enqueue more requests
  31. * and retry mechanism is not supported
  32. * make sure we are completing the current request
  33. */
  34. if (!engine->retry_support) {
  35. spin_lock_irqsave(&engine->queue_lock, flags);
  36. if (engine->cur_req == req) {
  37. finalize_req = true;
  38. engine->cur_req = NULL;
  39. }
  40. spin_unlock_irqrestore(&engine->queue_lock, flags);
  41. }
  42. if (finalize_req || engine->retry_support) {
  43. enginectx = crypto_tfm_ctx(req->tfm);
  44. if (enginectx->op.prepare_request &&
  45. enginectx->op.unprepare_request) {
  46. ret = enginectx->op.unprepare_request(engine, req);
  47. if (ret)
  48. dev_err(engine->dev, "failed to unprepare request\n");
  49. }
  50. }
  51. lockdep_assert_in_softirq();
  52. crypto_request_complete(req, err);
  53. kthread_queue_work(engine->kworker, &engine->pump_requests);
  54. }
  55. /**
  56. * crypto_pump_requests - dequeue one request from engine queue to process
  57. * @engine: the hardware engine
  58. * @in_kthread: true if we are in the context of the request pump thread
  59. *
  60. * This function checks if there is any request in the engine queue that
  61. * needs processing and if so call out to the driver to initialize hardware
  62. * and handle each request.
  63. */
  64. static void crypto_pump_requests(struct crypto_engine *engine,
  65. bool in_kthread)
  66. {
  67. struct crypto_async_request *async_req, *backlog;
  68. unsigned long flags;
  69. bool was_busy = false;
  70. int ret;
  71. struct crypto_engine_ctx *enginectx;
  72. spin_lock_irqsave(&engine->queue_lock, flags);
  73. /* Make sure we are not already running a request */
  74. if (!engine->retry_support && engine->cur_req)
  75. goto out;
  76. /* If another context is idling then defer */
  77. if (engine->idling) {
  78. kthread_queue_work(engine->kworker, &engine->pump_requests);
  79. goto out;
  80. }
  81. /* Check if the engine queue is idle */
  82. if (!crypto_queue_len(&engine->queue) || !engine->running) {
  83. if (!engine->busy)
  84. goto out;
  85. /* Only do teardown in the thread */
  86. if (!in_kthread) {
  87. kthread_queue_work(engine->kworker,
  88. &engine->pump_requests);
  89. goto out;
  90. }
  91. engine->busy = false;
  92. engine->idling = true;
  93. spin_unlock_irqrestore(&engine->queue_lock, flags);
  94. if (engine->unprepare_crypt_hardware &&
  95. engine->unprepare_crypt_hardware(engine))
  96. dev_err(engine->dev, "failed to unprepare crypt hardware\n");
  97. spin_lock_irqsave(&engine->queue_lock, flags);
  98. engine->idling = false;
  99. goto out;
  100. }
  101. start_request:
  102. /* Get the fist request from the engine queue to handle */
  103. backlog = crypto_get_backlog(&engine->queue);
  104. async_req = crypto_dequeue_request(&engine->queue);
  105. if (!async_req)
  106. goto out;
  107. /*
  108. * If hardware doesn't support the retry mechanism,
  109. * keep track of the request we are processing now.
  110. * We'll need it on completion (crypto_finalize_request).
  111. */
  112. if (!engine->retry_support)
  113. engine->cur_req = async_req;
  114. if (engine->busy)
  115. was_busy = true;
  116. else
  117. engine->busy = true;
  118. spin_unlock_irqrestore(&engine->queue_lock, flags);
  119. /* Until here we get the request need to be encrypted successfully */
  120. if (!was_busy && engine->prepare_crypt_hardware) {
  121. ret = engine->prepare_crypt_hardware(engine);
  122. if (ret) {
  123. dev_err(engine->dev, "failed to prepare crypt hardware\n");
  124. goto req_err_2;
  125. }
  126. }
  127. enginectx = crypto_tfm_ctx(async_req->tfm);
  128. if (enginectx->op.prepare_request) {
  129. ret = enginectx->op.prepare_request(engine, async_req);
  130. if (ret) {
  131. dev_err(engine->dev, "failed to prepare request: %d\n",
  132. ret);
  133. goto req_err_2;
  134. }
  135. }
  136. if (!enginectx->op.do_one_request) {
  137. dev_err(engine->dev, "failed to do request\n");
  138. ret = -EINVAL;
  139. goto req_err_1;
  140. }
  141. ret = enginectx->op.do_one_request(engine, async_req);
  142. /* Request unsuccessfully executed by hardware */
  143. if (ret < 0) {
  144. /*
  145. * If hardware queue is full (-ENOSPC), requeue request
  146. * regardless of backlog flag.
  147. * Otherwise, unprepare and complete the request.
  148. */
  149. if (!engine->retry_support ||
  150. (ret != -ENOSPC)) {
  151. dev_err(engine->dev,
  152. "Failed to do one request from queue: %d\n",
  153. ret);
  154. goto req_err_1;
  155. }
  156. /*
  157. * If retry mechanism is supported,
  158. * unprepare current request and
  159. * enqueue it back into crypto-engine queue.
  160. */
  161. if (enginectx->op.unprepare_request) {
  162. ret = enginectx->op.unprepare_request(engine,
  163. async_req);
  164. if (ret)
  165. dev_err(engine->dev,
  166. "failed to unprepare request\n");
  167. }
  168. spin_lock_irqsave(&engine->queue_lock, flags);
  169. /*
  170. * If hardware was unable to execute request, enqueue it
  171. * back in front of crypto-engine queue, to keep the order
  172. * of requests.
  173. */
  174. crypto_enqueue_request_head(&engine->queue, async_req);
  175. kthread_queue_work(engine->kworker, &engine->pump_requests);
  176. goto out;
  177. }
  178. goto retry;
  179. req_err_1:
  180. if (enginectx->op.unprepare_request) {
  181. ret = enginectx->op.unprepare_request(engine, async_req);
  182. if (ret)
  183. dev_err(engine->dev, "failed to unprepare request\n");
  184. }
  185. req_err_2:
  186. crypto_request_complete(async_req, ret);
  187. retry:
  188. if (backlog)
  189. crypto_request_complete(backlog, -EINPROGRESS);
  190. /* If retry mechanism is supported, send new requests to engine */
  191. if (engine->retry_support) {
  192. spin_lock_irqsave(&engine->queue_lock, flags);
  193. goto start_request;
  194. }
  195. return;
  196. out:
  197. spin_unlock_irqrestore(&engine->queue_lock, flags);
  198. /*
  199. * Batch requests is possible only if
  200. * hardware can enqueue multiple requests
  201. */
  202. if (engine->do_batch_requests) {
  203. ret = engine->do_batch_requests(engine);
  204. if (ret)
  205. dev_err(engine->dev, "failed to do batch requests: %d\n",
  206. ret);
  207. }
  208. return;
  209. }
  210. static void crypto_pump_work(struct kthread_work *work)
  211. {
  212. struct crypto_engine *engine =
  213. container_of(work, struct crypto_engine, pump_requests);
  214. crypto_pump_requests(engine, true);
  215. }
  216. /**
  217. * crypto_transfer_request - transfer the new request into the engine queue
  218. * @engine: the hardware engine
  219. * @req: the request need to be listed into the engine queue
  220. * @need_pump: indicates whether queue the pump of request to kthread_work
  221. */
  222. static int crypto_transfer_request(struct crypto_engine *engine,
  223. struct crypto_async_request *req,
  224. bool need_pump)
  225. {
  226. unsigned long flags;
  227. int ret;
  228. spin_lock_irqsave(&engine->queue_lock, flags);
  229. if (!engine->running) {
  230. spin_unlock_irqrestore(&engine->queue_lock, flags);
  231. return -ESHUTDOWN;
  232. }
  233. ret = crypto_enqueue_request(&engine->queue, req);
  234. if (!engine->busy && need_pump)
  235. kthread_queue_work(engine->kworker, &engine->pump_requests);
  236. spin_unlock_irqrestore(&engine->queue_lock, flags);
  237. return ret;
  238. }
  239. /**
  240. * crypto_transfer_request_to_engine - transfer one request to list
  241. * into the engine queue
  242. * @engine: the hardware engine
  243. * @req: the request need to be listed into the engine queue
  244. */
  245. static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
  246. struct crypto_async_request *req)
  247. {
  248. return crypto_transfer_request(engine, req, true);
  249. }
  250. /**
  251. * crypto_transfer_aead_request_to_engine - transfer one aead_request
  252. * to list into the engine queue
  253. * @engine: the hardware engine
  254. * @req: the request need to be listed into the engine queue
  255. */
  256. int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
  257. struct aead_request *req)
  258. {
  259. return crypto_transfer_request_to_engine(engine, &req->base);
  260. }
  261. EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
  262. /**
  263. * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
  264. * to list into the engine queue
  265. * @engine: the hardware engine
  266. * @req: the request need to be listed into the engine queue
  267. */
  268. int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
  269. struct akcipher_request *req)
  270. {
  271. return crypto_transfer_request_to_engine(engine, &req->base);
  272. }
  273. EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
  274. /**
  275. * crypto_transfer_hash_request_to_engine - transfer one ahash_request
  276. * to list into the engine queue
  277. * @engine: the hardware engine
  278. * @req: the request need to be listed into the engine queue
  279. */
  280. int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
  281. struct ahash_request *req)
  282. {
  283. return crypto_transfer_request_to_engine(engine, &req->base);
  284. }
  285. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
  286. /**
  287. * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
  288. * into the engine queue
  289. * @engine: the hardware engine
  290. * @req: the request need to be listed into the engine queue
  291. */
  292. int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
  293. struct kpp_request *req)
  294. {
  295. return crypto_transfer_request_to_engine(engine, &req->base);
  296. }
  297. EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
  298. /**
  299. * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
  300. * to list into the engine queue
  301. * @engine: the hardware engine
  302. * @req: the request need to be listed into the engine queue
  303. */
  304. int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
  305. struct skcipher_request *req)
  306. {
  307. return crypto_transfer_request_to_engine(engine, &req->base);
  308. }
  309. EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
  310. /**
  311. * crypto_finalize_aead_request - finalize one aead_request if
  312. * the request is done
  313. * @engine: the hardware engine
  314. * @req: the request need to be finalized
  315. * @err: error number
  316. */
  317. void crypto_finalize_aead_request(struct crypto_engine *engine,
  318. struct aead_request *req, int err)
  319. {
  320. return crypto_finalize_request(engine, &req->base, err);
  321. }
  322. EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
  323. /**
  324. * crypto_finalize_akcipher_request - finalize one akcipher_request if
  325. * the request is done
  326. * @engine: the hardware engine
  327. * @req: the request need to be finalized
  328. * @err: error number
  329. */
  330. void crypto_finalize_akcipher_request(struct crypto_engine *engine,
  331. struct akcipher_request *req, int err)
  332. {
  333. return crypto_finalize_request(engine, &req->base, err);
  334. }
  335. EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
  336. /**
  337. * crypto_finalize_hash_request - finalize one ahash_request if
  338. * the request is done
  339. * @engine: the hardware engine
  340. * @req: the request need to be finalized
  341. * @err: error number
  342. */
  343. void crypto_finalize_hash_request(struct crypto_engine *engine,
  344. struct ahash_request *req, int err)
  345. {
  346. return crypto_finalize_request(engine, &req->base, err);
  347. }
  348. EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
  349. /**
  350. * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
  351. * @engine: the hardware engine
  352. * @req: the request need to be finalized
  353. * @err: error number
  354. */
  355. void crypto_finalize_kpp_request(struct crypto_engine *engine,
  356. struct kpp_request *req, int err)
  357. {
  358. return crypto_finalize_request(engine, &req->base, err);
  359. }
  360. EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
  361. /**
  362. * crypto_finalize_skcipher_request - finalize one skcipher_request if
  363. * the request is done
  364. * @engine: the hardware engine
  365. * @req: the request need to be finalized
  366. * @err: error number
  367. */
  368. void crypto_finalize_skcipher_request(struct crypto_engine *engine,
  369. struct skcipher_request *req, int err)
  370. {
  371. return crypto_finalize_request(engine, &req->base, err);
  372. }
  373. EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
  374. /**
  375. * crypto_engine_start - start the hardware engine
  376. * @engine: the hardware engine need to be started
  377. *
  378. * Return 0 on success, else on fail.
  379. */
  380. int crypto_engine_start(struct crypto_engine *engine)
  381. {
  382. unsigned long flags;
  383. spin_lock_irqsave(&engine->queue_lock, flags);
  384. if (engine->running || engine->busy) {
  385. spin_unlock_irqrestore(&engine->queue_lock, flags);
  386. return -EBUSY;
  387. }
  388. engine->running = true;
  389. spin_unlock_irqrestore(&engine->queue_lock, flags);
  390. kthread_queue_work(engine->kworker, &engine->pump_requests);
  391. return 0;
  392. }
  393. EXPORT_SYMBOL_GPL(crypto_engine_start);
  394. /**
  395. * crypto_engine_stop - stop the hardware engine
  396. * @engine: the hardware engine need to be stopped
  397. *
  398. * Return 0 on success, else on fail.
  399. */
  400. int crypto_engine_stop(struct crypto_engine *engine)
  401. {
  402. unsigned long flags;
  403. unsigned int limit = 500;
  404. int ret = 0;
  405. spin_lock_irqsave(&engine->queue_lock, flags);
  406. /*
  407. * If the engine queue is not empty or the engine is on busy state,
  408. * we need to wait for a while to pump the requests of engine queue.
  409. */
  410. while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
  411. spin_unlock_irqrestore(&engine->queue_lock, flags);
  412. msleep(20);
  413. spin_lock_irqsave(&engine->queue_lock, flags);
  414. }
  415. if (crypto_queue_len(&engine->queue) || engine->busy)
  416. ret = -EBUSY;
  417. else
  418. engine->running = false;
  419. spin_unlock_irqrestore(&engine->queue_lock, flags);
  420. if (ret)
  421. dev_warn(engine->dev, "could not stop engine\n");
  422. return ret;
  423. }
  424. EXPORT_SYMBOL_GPL(crypto_engine_stop);
  425. /**
  426. * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
  427. * and initialize it by setting the maximum number of entries in the software
  428. * crypto-engine queue.
  429. * @dev: the device attached with one hardware engine
  430. * @retry_support: whether hardware has support for retry mechanism
  431. * @cbk_do_batch: pointer to a callback function to be invoked when executing
  432. * a batch of requests.
  433. * This has the form:
  434. * callback(struct crypto_engine *engine)
  435. * where:
  436. * @engine: the crypto engine structure.
  437. * @rt: whether this queue is set to run as a realtime task
  438. * @qlen: maximum size of the crypto-engine queue
  439. *
  440. * This must be called from context that can sleep.
  441. * Return: the crypto engine structure on success, else NULL.
  442. */
  443. struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
  444. bool retry_support,
  445. int (*cbk_do_batch)(struct crypto_engine *engine),
  446. bool rt, int qlen)
  447. {
  448. struct crypto_engine *engine;
  449. if (!dev)
  450. return NULL;
  451. engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
  452. if (!engine)
  453. return NULL;
  454. engine->dev = dev;
  455. engine->rt = rt;
  456. engine->running = false;
  457. engine->busy = false;
  458. engine->idling = false;
  459. engine->retry_support = retry_support;
  460. engine->priv_data = dev;
  461. /*
  462. * Batch requests is possible only if
  463. * hardware has support for retry mechanism.
  464. */
  465. engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
  466. snprintf(engine->name, sizeof(engine->name),
  467. "%s-engine", dev_name(dev));
  468. crypto_init_queue(&engine->queue, qlen);
  469. spin_lock_init(&engine->queue_lock);
  470. engine->kworker = kthread_create_worker(0, "%s", engine->name);
  471. if (IS_ERR(engine->kworker)) {
  472. dev_err(dev, "failed to create crypto request pump task\n");
  473. return NULL;
  474. }
  475. kthread_init_work(&engine->pump_requests, crypto_pump_work);
  476. if (engine->rt) {
  477. dev_info(dev, "will run requests pump with realtime priority\n");
  478. sched_set_fifo(engine->kworker->task);
  479. }
  480. return engine;
  481. }
  482. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
  483. /**
  484. * crypto_engine_alloc_init - allocate crypto hardware engine structure and
  485. * initialize it.
  486. * @dev: the device attached with one hardware engine
  487. * @rt: whether this queue is set to run as a realtime task
  488. *
  489. * This must be called from context that can sleep.
  490. * Return: the crypto engine structure on success, else NULL.
  491. */
  492. struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
  493. {
  494. return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
  495. CRYPTO_ENGINE_MAX_QLEN);
  496. }
  497. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
  498. /**
  499. * crypto_engine_exit - free the resources of hardware engine when exit
  500. * @engine: the hardware engine need to be freed
  501. *
  502. * Return 0 for success.
  503. */
  504. int crypto_engine_exit(struct crypto_engine *engine)
  505. {
  506. int ret;
  507. ret = crypto_engine_stop(engine);
  508. if (ret)
  509. return ret;
  510. kthread_destroy_worker(engine->kworker);
  511. return 0;
  512. }
  513. EXPORT_SYMBOL_GPL(crypto_engine_exit);
  514. MODULE_LICENSE("GPL");
  515. MODULE_DESCRIPTION("Crypto hardware engine framework");