sec_algs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2016-2017 HiSilicon Limited. */
  3. #include <linux/crypto.h>
  4. #include <linux/dma-mapping.h>
  5. #include <linux/dmapool.h>
  6. #include <linux/module.h>
  7. #include <linux/mutex.h>
  8. #include <linux/slab.h>
  9. #include <crypto/aes.h>
  10. #include <crypto/algapi.h>
  11. #include <crypto/internal/des.h>
  12. #include <crypto/skcipher.h>
  13. #include <crypto/xts.h>
  14. #include <crypto/internal/skcipher.h>
  15. #include "sec_drv.h"
  16. #define SEC_MAX_CIPHER_KEY 64
  17. #define SEC_REQ_LIMIT SZ_32M
  18. struct sec_c_alg_cfg {
  19. unsigned c_alg : 3;
  20. unsigned c_mode : 3;
  21. unsigned key_len : 2;
  22. unsigned c_width : 2;
  23. };
  24. static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
  25. [SEC_C_DES_ECB_64] = {
  26. .c_alg = SEC_C_ALG_DES,
  27. .c_mode = SEC_C_MODE_ECB,
  28. .key_len = SEC_KEY_LEN_DES,
  29. },
  30. [SEC_C_DES_CBC_64] = {
  31. .c_alg = SEC_C_ALG_DES,
  32. .c_mode = SEC_C_MODE_CBC,
  33. .key_len = SEC_KEY_LEN_DES,
  34. },
  35. [SEC_C_3DES_ECB_192_3KEY] = {
  36. .c_alg = SEC_C_ALG_3DES,
  37. .c_mode = SEC_C_MODE_ECB,
  38. .key_len = SEC_KEY_LEN_3DES_3_KEY,
  39. },
  40. [SEC_C_3DES_ECB_192_2KEY] = {
  41. .c_alg = SEC_C_ALG_3DES,
  42. .c_mode = SEC_C_MODE_ECB,
  43. .key_len = SEC_KEY_LEN_3DES_2_KEY,
  44. },
  45. [SEC_C_3DES_CBC_192_3KEY] = {
  46. .c_alg = SEC_C_ALG_3DES,
  47. .c_mode = SEC_C_MODE_CBC,
  48. .key_len = SEC_KEY_LEN_3DES_3_KEY,
  49. },
  50. [SEC_C_3DES_CBC_192_2KEY] = {
  51. .c_alg = SEC_C_ALG_3DES,
  52. .c_mode = SEC_C_MODE_CBC,
  53. .key_len = SEC_KEY_LEN_3DES_2_KEY,
  54. },
  55. [SEC_C_AES_ECB_128] = {
  56. .c_alg = SEC_C_ALG_AES,
  57. .c_mode = SEC_C_MODE_ECB,
  58. .key_len = SEC_KEY_LEN_AES_128,
  59. },
  60. [SEC_C_AES_ECB_192] = {
  61. .c_alg = SEC_C_ALG_AES,
  62. .c_mode = SEC_C_MODE_ECB,
  63. .key_len = SEC_KEY_LEN_AES_192,
  64. },
  65. [SEC_C_AES_ECB_256] = {
  66. .c_alg = SEC_C_ALG_AES,
  67. .c_mode = SEC_C_MODE_ECB,
  68. .key_len = SEC_KEY_LEN_AES_256,
  69. },
  70. [SEC_C_AES_CBC_128] = {
  71. .c_alg = SEC_C_ALG_AES,
  72. .c_mode = SEC_C_MODE_CBC,
  73. .key_len = SEC_KEY_LEN_AES_128,
  74. },
  75. [SEC_C_AES_CBC_192] = {
  76. .c_alg = SEC_C_ALG_AES,
  77. .c_mode = SEC_C_MODE_CBC,
  78. .key_len = SEC_KEY_LEN_AES_192,
  79. },
  80. [SEC_C_AES_CBC_256] = {
  81. .c_alg = SEC_C_ALG_AES,
  82. .c_mode = SEC_C_MODE_CBC,
  83. .key_len = SEC_KEY_LEN_AES_256,
  84. },
  85. [SEC_C_AES_CTR_128] = {
  86. .c_alg = SEC_C_ALG_AES,
  87. .c_mode = SEC_C_MODE_CTR,
  88. .key_len = SEC_KEY_LEN_AES_128,
  89. },
  90. [SEC_C_AES_CTR_192] = {
  91. .c_alg = SEC_C_ALG_AES,
  92. .c_mode = SEC_C_MODE_CTR,
  93. .key_len = SEC_KEY_LEN_AES_192,
  94. },
  95. [SEC_C_AES_CTR_256] = {
  96. .c_alg = SEC_C_ALG_AES,
  97. .c_mode = SEC_C_MODE_CTR,
  98. .key_len = SEC_KEY_LEN_AES_256,
  99. },
  100. [SEC_C_AES_XTS_128] = {
  101. .c_alg = SEC_C_ALG_AES,
  102. .c_mode = SEC_C_MODE_XTS,
  103. .key_len = SEC_KEY_LEN_AES_128,
  104. },
  105. [SEC_C_AES_XTS_256] = {
  106. .c_alg = SEC_C_ALG_AES,
  107. .c_mode = SEC_C_MODE_XTS,
  108. .key_len = SEC_KEY_LEN_AES_256,
  109. },
  110. [SEC_C_NULL] = {
  111. },
  112. };
  113. /*
  114. * Mutex used to ensure safe operation of reference count of
  115. * alg providers
  116. */
  117. static DEFINE_MUTEX(algs_lock);
  118. static unsigned int active_devs;
  119. static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
  120. struct sec_bd_info *req,
  121. enum sec_cipher_alg alg)
  122. {
  123. const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
  124. memset(req, 0, sizeof(*req));
  125. req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
  126. req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
  127. req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
  128. req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
  129. req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
  130. req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
  131. }
  132. static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
  133. const u8 *key,
  134. unsigned int keylen,
  135. enum sec_cipher_alg alg)
  136. {
  137. struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
  138. struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  139. ctx->cipher_alg = alg;
  140. memcpy(ctx->key, key, keylen);
  141. sec_alg_skcipher_init_template(ctx, &ctx->req_template,
  142. ctx->cipher_alg);
  143. }
  144. static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
  145. dma_addr_t psec_sgl, struct sec_dev_info *info)
  146. {
  147. struct sec_hw_sgl *sgl_current, *sgl_next;
  148. dma_addr_t sgl_next_dma;
  149. sgl_current = hw_sgl;
  150. while (sgl_current) {
  151. sgl_next = sgl_current->next;
  152. sgl_next_dma = sgl_current->next_sgl;
  153. dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
  154. sgl_current = sgl_next;
  155. psec_sgl = sgl_next_dma;
  156. }
  157. }
  158. static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
  159. dma_addr_t *psec_sgl,
  160. struct scatterlist *sgl,
  161. int count,
  162. struct sec_dev_info *info,
  163. gfp_t gfp)
  164. {
  165. struct sec_hw_sgl *sgl_current = NULL;
  166. struct sec_hw_sgl *sgl_next;
  167. dma_addr_t sgl_next_dma;
  168. struct scatterlist *sg;
  169. int ret, sge_index, i;
  170. if (!count)
  171. return -EINVAL;
  172. for_each_sg(sgl, sg, count, i) {
  173. sge_index = i % SEC_MAX_SGE_NUM;
  174. if (sge_index == 0) {
  175. sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
  176. gfp, &sgl_next_dma);
  177. if (!sgl_next) {
  178. ret = -ENOMEM;
  179. goto err_free_hw_sgls;
  180. }
  181. if (!sgl_current) { /* First one */
  182. *psec_sgl = sgl_next_dma;
  183. *sec_sgl = sgl_next;
  184. } else { /* Chained */
  185. sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
  186. sgl_current->next_sgl = sgl_next_dma;
  187. sgl_current->next = sgl_next;
  188. }
  189. sgl_current = sgl_next;
  190. }
  191. sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
  192. sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
  193. sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
  194. }
  195. sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
  196. sgl_current->next_sgl = 0;
  197. (*sec_sgl)->entry_sum_in_chain = count;
  198. return 0;
  199. err_free_hw_sgls:
  200. sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);
  201. *psec_sgl = 0;
  202. return ret;
  203. }
  204. static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
  205. const u8 *key, unsigned int keylen,
  206. enum sec_cipher_alg alg)
  207. {
  208. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  209. struct device *dev = ctx->queue->dev_info->dev;
  210. mutex_lock(&ctx->lock);
  211. if (ctx->key) {
  212. /* rekeying */
  213. memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
  214. } else {
  215. /* new key */
  216. ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
  217. &ctx->pkey, GFP_KERNEL);
  218. if (!ctx->key) {
  219. mutex_unlock(&ctx->lock);
  220. return -ENOMEM;
  221. }
  222. }
  223. mutex_unlock(&ctx->lock);
  224. sec_alg_skcipher_init_context(tfm, key, keylen, alg);
  225. return 0;
  226. }
  227. static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
  228. const u8 *key, unsigned int keylen)
  229. {
  230. enum sec_cipher_alg alg;
  231. switch (keylen) {
  232. case AES_KEYSIZE_128:
  233. alg = SEC_C_AES_ECB_128;
  234. break;
  235. case AES_KEYSIZE_192:
  236. alg = SEC_C_AES_ECB_192;
  237. break;
  238. case AES_KEYSIZE_256:
  239. alg = SEC_C_AES_ECB_256;
  240. break;
  241. default:
  242. return -EINVAL;
  243. }
  244. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  245. }
  246. static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
  247. const u8 *key, unsigned int keylen)
  248. {
  249. enum sec_cipher_alg alg;
  250. switch (keylen) {
  251. case AES_KEYSIZE_128:
  252. alg = SEC_C_AES_CBC_128;
  253. break;
  254. case AES_KEYSIZE_192:
  255. alg = SEC_C_AES_CBC_192;
  256. break;
  257. case AES_KEYSIZE_256:
  258. alg = SEC_C_AES_CBC_256;
  259. break;
  260. default:
  261. return -EINVAL;
  262. }
  263. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  264. }
  265. static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
  266. const u8 *key, unsigned int keylen)
  267. {
  268. enum sec_cipher_alg alg;
  269. switch (keylen) {
  270. case AES_KEYSIZE_128:
  271. alg = SEC_C_AES_CTR_128;
  272. break;
  273. case AES_KEYSIZE_192:
  274. alg = SEC_C_AES_CTR_192;
  275. break;
  276. case AES_KEYSIZE_256:
  277. alg = SEC_C_AES_CTR_256;
  278. break;
  279. default:
  280. return -EINVAL;
  281. }
  282. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  283. }
  284. static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
  285. const u8 *key, unsigned int keylen)
  286. {
  287. enum sec_cipher_alg alg;
  288. int ret;
  289. ret = xts_verify_key(tfm, key, keylen);
  290. if (ret)
  291. return ret;
  292. switch (keylen) {
  293. case AES_KEYSIZE_128 * 2:
  294. alg = SEC_C_AES_XTS_128;
  295. break;
  296. case AES_KEYSIZE_256 * 2:
  297. alg = SEC_C_AES_XTS_256;
  298. break;
  299. default:
  300. return -EINVAL;
  301. }
  302. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  303. }
  304. static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
  305. const u8 *key, unsigned int keylen)
  306. {
  307. return verify_skcipher_des_key(tfm, key) ?:
  308. sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
  309. }
  310. static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
  311. const u8 *key, unsigned int keylen)
  312. {
  313. return verify_skcipher_des_key(tfm, key) ?:
  314. sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
  315. }
  316. static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
  317. const u8 *key, unsigned int keylen)
  318. {
  319. return verify_skcipher_des3_key(tfm, key) ?:
  320. sec_alg_skcipher_setkey(tfm, key, keylen,
  321. SEC_C_3DES_ECB_192_3KEY);
  322. }
  323. static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
  324. const u8 *key, unsigned int keylen)
  325. {
  326. return verify_skcipher_des3_key(tfm, key) ?:
  327. sec_alg_skcipher_setkey(tfm, key, keylen,
  328. SEC_C_3DES_CBC_192_3KEY);
  329. }
  330. static void sec_alg_free_el(struct sec_request_el *el,
  331. struct sec_dev_info *info)
  332. {
  333. sec_free_hw_sgl(el->out, el->dma_out, info);
  334. sec_free_hw_sgl(el->in, el->dma_in, info);
  335. kfree(el->sgl_in);
  336. kfree(el->sgl_out);
  337. kfree(el);
  338. }
  339. /* queuelock must be held */
  340. static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
  341. {
  342. struct sec_request_el *el, *temp;
  343. int ret = 0;
  344. mutex_lock(&sec_req->lock);
  345. list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
  346. /*
  347. * Add to hardware queue only under following circumstances
  348. * 1) Software and hardware queue empty so no chain dependencies
  349. * 2) No dependencies as new IV - (check software queue empty
  350. * to maintain order)
  351. * 3) No dependencies because the mode does no chaining.
  352. *
  353. * In other cases first insert onto the software queue which
  354. * is then emptied as requests complete
  355. */
  356. if (!queue->havesoftqueue ||
  357. (kfifo_is_empty(&queue->softqueue) &&
  358. sec_queue_empty(queue))) {
  359. ret = sec_queue_send(queue, &el->req, sec_req);
  360. if (ret == -EAGAIN) {
  361. /* Wait unti we can send then try again */
  362. /* DEAD if here - should not happen */
  363. ret = -EBUSY;
  364. goto err_unlock;
  365. }
  366. } else {
  367. kfifo_put(&queue->softqueue, el);
  368. }
  369. }
  370. err_unlock:
  371. mutex_unlock(&sec_req->lock);
  372. return ret;
  373. }
  374. static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
  375. struct crypto_async_request *req_base)
  376. {
  377. struct skcipher_request *skreq = container_of(req_base,
  378. struct skcipher_request,
  379. base);
  380. struct sec_request *sec_req = skcipher_request_ctx(skreq);
  381. struct sec_request *backlog_req;
  382. struct sec_request_el *sec_req_el, *nextrequest;
  383. struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
  384. struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
  385. struct device *dev = ctx->queue->dev_info->dev;
  386. int icv_or_skey_en, ret;
  387. bool done;
  388. sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
  389. head);
  390. icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
  391. SEC_BD_W0_ICV_OR_SKEY_EN_S;
  392. if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
  393. dev_err(dev, "Got an invalid answer %lu %d\n",
  394. sec_resp->w1 & SEC_BD_W1_BD_INVALID,
  395. icv_or_skey_en);
  396. sec_req->err = -EINVAL;
  397. /*
  398. * We need to muddle on to avoid getting stuck with elements
  399. * on the queue. Error will be reported so requester so
  400. * it should be able to handle appropriately.
  401. */
  402. }
  403. spin_lock_bh(&ctx->queue->queuelock);
  404. /* Put the IV in place for chained cases */
  405. switch (ctx->cipher_alg) {
  406. case SEC_C_AES_CBC_128:
  407. case SEC_C_AES_CBC_192:
  408. case SEC_C_AES_CBC_256:
  409. if (sec_req_el->req.w0 & SEC_BD_W0_DE)
  410. sg_pcopy_to_buffer(sec_req_el->sgl_out,
  411. sg_nents(sec_req_el->sgl_out),
  412. skreq->iv,
  413. crypto_skcipher_ivsize(atfm),
  414. sec_req_el->el_length -
  415. crypto_skcipher_ivsize(atfm));
  416. else
  417. sg_pcopy_to_buffer(sec_req_el->sgl_in,
  418. sg_nents(sec_req_el->sgl_in),
  419. skreq->iv,
  420. crypto_skcipher_ivsize(atfm),
  421. sec_req_el->el_length -
  422. crypto_skcipher_ivsize(atfm));
  423. /* No need to sync to the device as coherent DMA */
  424. break;
  425. case SEC_C_AES_CTR_128:
  426. case SEC_C_AES_CTR_192:
  427. case SEC_C_AES_CTR_256:
  428. crypto_inc(skreq->iv, 16);
  429. break;
  430. default:
  431. /* Do not update */
  432. break;
  433. }
  434. if (ctx->queue->havesoftqueue &&
  435. !kfifo_is_empty(&ctx->queue->softqueue) &&
  436. sec_queue_empty(ctx->queue)) {
  437. ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
  438. if (ret <= 0)
  439. dev_err(dev,
  440. "Error getting next element from kfifo %d\n",
  441. ret);
  442. else
  443. /* We know there is space so this cannot fail */
  444. sec_queue_send(ctx->queue, &nextrequest->req,
  445. nextrequest->sec_req);
  446. } else if (!list_empty(&ctx->backlog)) {
  447. /* Need to verify there is room first */
  448. backlog_req = list_first_entry(&ctx->backlog,
  449. typeof(*backlog_req),
  450. backlog_head);
  451. if (sec_queue_can_enqueue(ctx->queue,
  452. backlog_req->num_elements) ||
  453. (ctx->queue->havesoftqueue &&
  454. kfifo_avail(&ctx->queue->softqueue) >
  455. backlog_req->num_elements)) {
  456. sec_send_request(backlog_req, ctx->queue);
  457. backlog_req->req_base->complete(backlog_req->req_base,
  458. -EINPROGRESS);
  459. list_del(&backlog_req->backlog_head);
  460. }
  461. }
  462. spin_unlock_bh(&ctx->queue->queuelock);
  463. mutex_lock(&sec_req->lock);
  464. list_del(&sec_req_el->head);
  465. mutex_unlock(&sec_req->lock);
  466. sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
  467. /*
  468. * Request is done.
  469. * The dance is needed as the lock is freed in the completion
  470. */
  471. mutex_lock(&sec_req->lock);
  472. done = list_empty(&sec_req->elements);
  473. mutex_unlock(&sec_req->lock);
  474. if (done) {
  475. if (crypto_skcipher_ivsize(atfm)) {
  476. dma_unmap_single(dev, sec_req->dma_iv,
  477. crypto_skcipher_ivsize(atfm),
  478. DMA_TO_DEVICE);
  479. }
  480. dma_unmap_sg(dev, skreq->src, sec_req->len_in,
  481. DMA_BIDIRECTIONAL);
  482. if (skreq->src != skreq->dst)
  483. dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
  484. DMA_BIDIRECTIONAL);
  485. skreq->base.complete(&skreq->base, sec_req->err);
  486. }
  487. }
  488. void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
  489. {
  490. struct sec_request *sec_req = shadow;
  491. sec_req->cb(resp, sec_req->req_base);
  492. }
  493. static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
  494. int *steps, gfp_t gfp)
  495. {
  496. size_t *sizes;
  497. int i;
  498. /* Split into suitable sized blocks */
  499. *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
  500. sizes = kcalloc(*steps, sizeof(*sizes), gfp);
  501. if (!sizes)
  502. return -ENOMEM;
  503. for (i = 0; i < *steps - 1; i++)
  504. sizes[i] = SEC_REQ_LIMIT;
  505. sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
  506. *split_sizes = sizes;
  507. return 0;
  508. }
  509. static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
  510. int steps, struct scatterlist ***splits,
  511. int **splits_nents,
  512. int sgl_len_in,
  513. struct device *dev, gfp_t gfp)
  514. {
  515. int ret, count;
  516. count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  517. if (!count)
  518. return -EINVAL;
  519. *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
  520. if (!*splits) {
  521. ret = -ENOMEM;
  522. goto err_unmap_sg;
  523. }
  524. *splits_nents = kcalloc(steps, sizeof(int), gfp);
  525. if (!*splits_nents) {
  526. ret = -ENOMEM;
  527. goto err_free_splits;
  528. }
  529. /* output the scatter list before and after this */
  530. ret = sg_split(sgl, count, 0, steps, split_sizes,
  531. *splits, *splits_nents, gfp);
  532. if (ret) {
  533. ret = -ENOMEM;
  534. goto err_free_splits_nents;
  535. }
  536. return 0;
  537. err_free_splits_nents:
  538. kfree(*splits_nents);
  539. err_free_splits:
  540. kfree(*splits);
  541. err_unmap_sg:
  542. dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  543. return ret;
  544. }
  545. /*
  546. * Reverses the sec_map_and_split_sg call for messages not yet added to
  547. * the queues.
  548. */
  549. static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
  550. struct scatterlist **splits, int *splits_nents,
  551. int sgl_len_in, struct device *dev)
  552. {
  553. int i;
  554. for (i = 0; i < steps; i++)
  555. kfree(splits[i]);
  556. kfree(splits_nents);
  557. kfree(splits);
  558. dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  559. }
  560. static struct sec_request_el
  561. *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
  562. int el_size, bool different_dest,
  563. struct scatterlist *sgl_in, int n_ents_in,
  564. struct scatterlist *sgl_out, int n_ents_out,
  565. struct sec_dev_info *info, gfp_t gfp)
  566. {
  567. struct sec_request_el *el;
  568. struct sec_bd_info *req;
  569. int ret;
  570. el = kzalloc(sizeof(*el), gfp);
  571. if (!el)
  572. return ERR_PTR(-ENOMEM);
  573. el->el_length = el_size;
  574. req = &el->req;
  575. memcpy(req, template, sizeof(*req));
  576. req->w0 &= ~SEC_BD_W0_CIPHER_M;
  577. if (encrypt)
  578. req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
  579. else
  580. req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
  581. req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
  582. req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
  583. SEC_BD_W0_C_GRAN_SIZE_19_16_M;
  584. req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
  585. req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
  586. SEC_BD_W0_C_GRAN_SIZE_21_20_M;
  587. /* Writing whole u32 so no need to take care of masking */
  588. req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
  589. ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
  590. SEC_BD_W2_C_GRAN_SIZE_15_0_M);
  591. req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
  592. req->w1 |= SEC_BD_W1_ADDR_TYPE;
  593. el->sgl_in = sgl_in;
  594. ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
  595. n_ents_in, info, gfp);
  596. if (ret)
  597. goto err_free_el;
  598. req->data_addr_lo = lower_32_bits(el->dma_in);
  599. req->data_addr_hi = upper_32_bits(el->dma_in);
  600. if (different_dest) {
  601. el->sgl_out = sgl_out;
  602. ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
  603. el->sgl_out,
  604. n_ents_out, info, gfp);
  605. if (ret)
  606. goto err_free_hw_sgl_in;
  607. req->w0 |= SEC_BD_W0_DE;
  608. req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
  609. req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
  610. } else {
  611. req->w0 &= ~SEC_BD_W0_DE;
  612. req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
  613. req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
  614. }
  615. return el;
  616. err_free_hw_sgl_in:
  617. sec_free_hw_sgl(el->in, el->dma_in, info);
  618. err_free_el:
  619. kfree(el);
  620. return ERR_PTR(ret);
  621. }
  622. static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
  623. bool encrypt)
  624. {
  625. struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
  626. struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
  627. struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  628. struct sec_queue *queue = ctx->queue;
  629. struct sec_request *sec_req = skcipher_request_ctx(skreq);
  630. struct sec_dev_info *info = queue->dev_info;
  631. int i, ret, steps;
  632. size_t *split_sizes;
  633. struct scatterlist **splits_in;
  634. struct scatterlist **splits_out = NULL;
  635. int *splits_in_nents;
  636. int *splits_out_nents = NULL;
  637. struct sec_request_el *el, *temp;
  638. bool split = skreq->src != skreq->dst;
  639. gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
  640. mutex_init(&sec_req->lock);
  641. sec_req->req_base = &skreq->base;
  642. sec_req->err = 0;
  643. /* SGL mapping out here to allow us to break it up as necessary */
  644. sec_req->len_in = sg_nents(skreq->src);
  645. ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
  646. &steps, gfp);
  647. if (ret)
  648. return ret;
  649. sec_req->num_elements = steps;
  650. ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
  651. &splits_in_nents, sec_req->len_in,
  652. info->dev, gfp);
  653. if (ret)
  654. goto err_free_split_sizes;
  655. if (split) {
  656. sec_req->len_out = sg_nents(skreq->dst);
  657. ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
  658. &splits_out, &splits_out_nents,
  659. sec_req->len_out, info->dev, gfp);
  660. if (ret)
  661. goto err_unmap_in_sg;
  662. }
  663. /* Shared info stored in seq_req - applies to all BDs */
  664. sec_req->tfm_ctx = ctx;
  665. sec_req->cb = sec_skcipher_alg_callback;
  666. INIT_LIST_HEAD(&sec_req->elements);
  667. /*
  668. * Future optimization.
  669. * In the chaining case we can't use a dma pool bounce buffer
  670. * but in the case where we know there is no chaining we can
  671. */
  672. if (crypto_skcipher_ivsize(atfm)) {
  673. sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
  674. crypto_skcipher_ivsize(atfm),
  675. DMA_TO_DEVICE);
  676. if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
  677. ret = -ENOMEM;
  678. goto err_unmap_out_sg;
  679. }
  680. }
  681. /* Set them all up then queue - cleaner error handling. */
  682. for (i = 0; i < steps; i++) {
  683. el = sec_alg_alloc_and_fill_el(&ctx->req_template,
  684. encrypt ? 1 : 0,
  685. split_sizes[i],
  686. skreq->src != skreq->dst,
  687. splits_in[i], splits_in_nents[i],
  688. split ? splits_out[i] : NULL,
  689. split ? splits_out_nents[i] : 0,
  690. info, gfp);
  691. if (IS_ERR(el)) {
  692. ret = PTR_ERR(el);
  693. goto err_free_elements;
  694. }
  695. el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
  696. el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
  697. el->sec_req = sec_req;
  698. list_add_tail(&el->head, &sec_req->elements);
  699. }
  700. /*
  701. * Only attempt to queue if the whole lot can fit in the queue -
  702. * we can't successfully cleanup after a partial queing so this
  703. * must succeed or fail atomically.
  704. *
  705. * Big hammer test of both software and hardware queues - could be
  706. * more refined but this is unlikely to happen so no need.
  707. */
  708. /* Grab a big lock for a long time to avoid concurrency issues */
  709. spin_lock_bh(&queue->queuelock);
  710. /*
  711. * Can go on to queue if we have space in either:
  712. * 1) The hardware queue and no software queue
  713. * 2) The software queue
  714. * AND there is nothing in the backlog. If there is backlog we
  715. * have to only queue to the backlog queue and return busy.
  716. */
  717. if ((!sec_queue_can_enqueue(queue, steps) &&
  718. (!queue->havesoftqueue ||
  719. kfifo_avail(&queue->softqueue) > steps)) ||
  720. !list_empty(&ctx->backlog)) {
  721. ret = -EBUSY;
  722. if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  723. list_add_tail(&sec_req->backlog_head, &ctx->backlog);
  724. spin_unlock_bh(&queue->queuelock);
  725. goto out;
  726. }
  727. spin_unlock_bh(&queue->queuelock);
  728. goto err_free_elements;
  729. }
  730. ret = sec_send_request(sec_req, queue);
  731. spin_unlock_bh(&queue->queuelock);
  732. if (ret)
  733. goto err_free_elements;
  734. ret = -EINPROGRESS;
  735. out:
  736. /* Cleanup - all elements in pointer arrays have been copied */
  737. kfree(splits_in_nents);
  738. kfree(splits_in);
  739. kfree(splits_out_nents);
  740. kfree(splits_out);
  741. kfree(split_sizes);
  742. return ret;
  743. err_free_elements:
  744. list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
  745. list_del(&el->head);
  746. sec_alg_free_el(el, info);
  747. }
  748. if (crypto_skcipher_ivsize(atfm))
  749. dma_unmap_single(info->dev, sec_req->dma_iv,
  750. crypto_skcipher_ivsize(atfm),
  751. DMA_BIDIRECTIONAL);
  752. err_unmap_out_sg:
  753. if (split)
  754. sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
  755. splits_out_nents, sec_req->len_out,
  756. info->dev);
  757. err_unmap_in_sg:
  758. sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
  759. sec_req->len_in, info->dev);
  760. err_free_split_sizes:
  761. kfree(split_sizes);
  762. return ret;
  763. }
  764. static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
  765. {
  766. return sec_alg_skcipher_crypto(req, true);
  767. }
  768. static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
  769. {
  770. return sec_alg_skcipher_crypto(req, false);
  771. }
  772. static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
  773. {
  774. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  775. mutex_init(&ctx->lock);
  776. INIT_LIST_HEAD(&ctx->backlog);
  777. crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
  778. ctx->queue = sec_queue_alloc_start_safe();
  779. if (IS_ERR(ctx->queue))
  780. return PTR_ERR(ctx->queue);
  781. spin_lock_init(&ctx->queue->queuelock);
  782. ctx->queue->havesoftqueue = false;
  783. return 0;
  784. }
  785. static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
  786. {
  787. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  788. struct device *dev = ctx->queue->dev_info->dev;
  789. if (ctx->key) {
  790. memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
  791. dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
  792. ctx->pkey);
  793. }
  794. sec_queue_stop_release(ctx->queue);
  795. }
  796. static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
  797. {
  798. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  799. int ret;
  800. ret = sec_alg_skcipher_init(tfm);
  801. if (ret)
  802. return ret;
  803. INIT_KFIFO(ctx->queue->softqueue);
  804. ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
  805. if (ret) {
  806. sec_alg_skcipher_exit(tfm);
  807. return ret;
  808. }
  809. ctx->queue->havesoftqueue = true;
  810. return 0;
  811. }
  812. static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
  813. {
  814. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  815. kfifo_free(&ctx->queue->softqueue);
  816. sec_alg_skcipher_exit(tfm);
  817. }
  818. static struct skcipher_alg sec_algs[] = {
  819. {
  820. .base = {
  821. .cra_name = "ecb(aes)",
  822. .cra_driver_name = "hisi_sec_aes_ecb",
  823. .cra_priority = 4001,
  824. .cra_flags = CRYPTO_ALG_ASYNC |
  825. CRYPTO_ALG_ALLOCATES_MEMORY,
  826. .cra_blocksize = AES_BLOCK_SIZE,
  827. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  828. .cra_alignmask = 0,
  829. .cra_module = THIS_MODULE,
  830. },
  831. .init = sec_alg_skcipher_init,
  832. .exit = sec_alg_skcipher_exit,
  833. .setkey = sec_alg_skcipher_setkey_aes_ecb,
  834. .decrypt = sec_alg_skcipher_decrypt,
  835. .encrypt = sec_alg_skcipher_encrypt,
  836. .min_keysize = AES_MIN_KEY_SIZE,
  837. .max_keysize = AES_MAX_KEY_SIZE,
  838. .ivsize = 0,
  839. }, {
  840. .base = {
  841. .cra_name = "cbc(aes)",
  842. .cra_driver_name = "hisi_sec_aes_cbc",
  843. .cra_priority = 4001,
  844. .cra_flags = CRYPTO_ALG_ASYNC |
  845. CRYPTO_ALG_ALLOCATES_MEMORY,
  846. .cra_blocksize = AES_BLOCK_SIZE,
  847. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  848. .cra_alignmask = 0,
  849. .cra_module = THIS_MODULE,
  850. },
  851. .init = sec_alg_skcipher_init_with_queue,
  852. .exit = sec_alg_skcipher_exit_with_queue,
  853. .setkey = sec_alg_skcipher_setkey_aes_cbc,
  854. .decrypt = sec_alg_skcipher_decrypt,
  855. .encrypt = sec_alg_skcipher_encrypt,
  856. .min_keysize = AES_MIN_KEY_SIZE,
  857. .max_keysize = AES_MAX_KEY_SIZE,
  858. .ivsize = AES_BLOCK_SIZE,
  859. }, {
  860. .base = {
  861. .cra_name = "ctr(aes)",
  862. .cra_driver_name = "hisi_sec_aes_ctr",
  863. .cra_priority = 4001,
  864. .cra_flags = CRYPTO_ALG_ASYNC |
  865. CRYPTO_ALG_ALLOCATES_MEMORY,
  866. .cra_blocksize = AES_BLOCK_SIZE,
  867. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  868. .cra_alignmask = 0,
  869. .cra_module = THIS_MODULE,
  870. },
  871. .init = sec_alg_skcipher_init_with_queue,
  872. .exit = sec_alg_skcipher_exit_with_queue,
  873. .setkey = sec_alg_skcipher_setkey_aes_ctr,
  874. .decrypt = sec_alg_skcipher_decrypt,
  875. .encrypt = sec_alg_skcipher_encrypt,
  876. .min_keysize = AES_MIN_KEY_SIZE,
  877. .max_keysize = AES_MAX_KEY_SIZE,
  878. .ivsize = AES_BLOCK_SIZE,
  879. }, {
  880. .base = {
  881. .cra_name = "xts(aes)",
  882. .cra_driver_name = "hisi_sec_aes_xts",
  883. .cra_priority = 4001,
  884. .cra_flags = CRYPTO_ALG_ASYNC |
  885. CRYPTO_ALG_ALLOCATES_MEMORY,
  886. .cra_blocksize = AES_BLOCK_SIZE,
  887. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  888. .cra_alignmask = 0,
  889. .cra_module = THIS_MODULE,
  890. },
  891. .init = sec_alg_skcipher_init,
  892. .exit = sec_alg_skcipher_exit,
  893. .setkey = sec_alg_skcipher_setkey_aes_xts,
  894. .decrypt = sec_alg_skcipher_decrypt,
  895. .encrypt = sec_alg_skcipher_encrypt,
  896. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  897. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  898. .ivsize = AES_BLOCK_SIZE,
  899. }, {
  900. /* Unable to find any test vectors so untested */
  901. .base = {
  902. .cra_name = "ecb(des)",
  903. .cra_driver_name = "hisi_sec_des_ecb",
  904. .cra_priority = 4001,
  905. .cra_flags = CRYPTO_ALG_ASYNC |
  906. CRYPTO_ALG_ALLOCATES_MEMORY,
  907. .cra_blocksize = DES_BLOCK_SIZE,
  908. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  909. .cra_alignmask = 0,
  910. .cra_module = THIS_MODULE,
  911. },
  912. .init = sec_alg_skcipher_init,
  913. .exit = sec_alg_skcipher_exit,
  914. .setkey = sec_alg_skcipher_setkey_des_ecb,
  915. .decrypt = sec_alg_skcipher_decrypt,
  916. .encrypt = sec_alg_skcipher_encrypt,
  917. .min_keysize = DES_KEY_SIZE,
  918. .max_keysize = DES_KEY_SIZE,
  919. .ivsize = 0,
  920. }, {
  921. .base = {
  922. .cra_name = "cbc(des)",
  923. .cra_driver_name = "hisi_sec_des_cbc",
  924. .cra_priority = 4001,
  925. .cra_flags = CRYPTO_ALG_ASYNC |
  926. CRYPTO_ALG_ALLOCATES_MEMORY,
  927. .cra_blocksize = DES_BLOCK_SIZE,
  928. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  929. .cra_alignmask = 0,
  930. .cra_module = THIS_MODULE,
  931. },
  932. .init = sec_alg_skcipher_init_with_queue,
  933. .exit = sec_alg_skcipher_exit_with_queue,
  934. .setkey = sec_alg_skcipher_setkey_des_cbc,
  935. .decrypt = sec_alg_skcipher_decrypt,
  936. .encrypt = sec_alg_skcipher_encrypt,
  937. .min_keysize = DES_KEY_SIZE,
  938. .max_keysize = DES_KEY_SIZE,
  939. .ivsize = DES_BLOCK_SIZE,
  940. }, {
  941. .base = {
  942. .cra_name = "cbc(des3_ede)",
  943. .cra_driver_name = "hisi_sec_3des_cbc",
  944. .cra_priority = 4001,
  945. .cra_flags = CRYPTO_ALG_ASYNC |
  946. CRYPTO_ALG_ALLOCATES_MEMORY,
  947. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  948. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  949. .cra_alignmask = 0,
  950. .cra_module = THIS_MODULE,
  951. },
  952. .init = sec_alg_skcipher_init_with_queue,
  953. .exit = sec_alg_skcipher_exit_with_queue,
  954. .setkey = sec_alg_skcipher_setkey_3des_cbc,
  955. .decrypt = sec_alg_skcipher_decrypt,
  956. .encrypt = sec_alg_skcipher_encrypt,
  957. .min_keysize = DES3_EDE_KEY_SIZE,
  958. .max_keysize = DES3_EDE_KEY_SIZE,
  959. .ivsize = DES3_EDE_BLOCK_SIZE,
  960. }, {
  961. .base = {
  962. .cra_name = "ecb(des3_ede)",
  963. .cra_driver_name = "hisi_sec_3des_ecb",
  964. .cra_priority = 4001,
  965. .cra_flags = CRYPTO_ALG_ASYNC |
  966. CRYPTO_ALG_ALLOCATES_MEMORY,
  967. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  968. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  969. .cra_alignmask = 0,
  970. .cra_module = THIS_MODULE,
  971. },
  972. .init = sec_alg_skcipher_init,
  973. .exit = sec_alg_skcipher_exit,
  974. .setkey = sec_alg_skcipher_setkey_3des_ecb,
  975. .decrypt = sec_alg_skcipher_decrypt,
  976. .encrypt = sec_alg_skcipher_encrypt,
  977. .min_keysize = DES3_EDE_KEY_SIZE,
  978. .max_keysize = DES3_EDE_KEY_SIZE,
  979. .ivsize = 0,
  980. }
  981. };
  982. int sec_algs_register(void)
  983. {
  984. int ret = 0;
  985. mutex_lock(&algs_lock);
  986. if (++active_devs != 1)
  987. goto unlock;
  988. ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
  989. if (ret)
  990. --active_devs;
  991. unlock:
  992. mutex_unlock(&algs_lock);
  993. return ret;
  994. }
  995. void sec_algs_unregister(void)
  996. {
  997. mutex_lock(&algs_lock);
  998. if (--active_devs != 0)
  999. goto unlock;
  1000. crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
  1001. unlock:
  1002. mutex_unlock(&algs_lock);
  1003. }