caamhash.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * caam - Freescale FSL CAAM support for ahash functions of crypto API
  4. *
  5. * Copyright 2011 Freescale Semiconductor, Inc.
  6. * Copyright 2018-2019 NXP
  7. *
  8. * Based on caamalg.c crypto API driver.
  9. *
  10. * relationship of digest job descriptor or first job descriptor after init to
  11. * shared descriptors:
  12. *
  13. * --------------- ---------------
  14. * | JobDesc #1 |-------------------->| ShareDesc |
  15. * | *(packet 1) | | (hashKey) |
  16. * --------------- | (operation) |
  17. * ---------------
  18. *
  19. * relationship of subsequent job descriptors to shared descriptors:
  20. *
  21. * --------------- ---------------
  22. * | JobDesc #2 |-------------------->| ShareDesc |
  23. * | *(packet 2) | |------------->| (hashKey) |
  24. * --------------- | |-------->| (operation) |
  25. * . | | | (load ctx2) |
  26. * . | | ---------------
  27. * --------------- | |
  28. * | JobDesc #3 |------| |
  29. * | *(packet 3) | |
  30. * --------------- |
  31. * . |
  32. * . |
  33. * --------------- |
  34. * | JobDesc #4 |------------
  35. * | *(packet 4) |
  36. * ---------------
  37. *
  38. * The SharedDesc never changes for a connection unless rekeyed, but
  39. * each packet will likely be in a different place. So all we need
  40. * to know to process the packet is where the input is, where the
  41. * output goes, and what context we want to process with. Context is
  42. * in the SharedDesc, packet references in the JobDesc.
  43. *
  44. * So, a job desc looks like:
  45. *
  46. * ---------------------
  47. * | Header |
  48. * | ShareDesc Pointer |
  49. * | SEQ_OUT_PTR |
  50. * | (output buffer) |
  51. * | (output length) |
  52. * | SEQ_IN_PTR |
  53. * | (input buffer) |
  54. * | (input length) |
  55. * ---------------------
  56. */
  57. #include "compat.h"
  58. #include "regs.h"
  59. #include "intern.h"
  60. #include "desc_constr.h"
  61. #include "jr.h"
  62. #include "error.h"
  63. #include "sg_sw_sec4.h"
  64. #include "key_gen.h"
  65. #include "caamhash_desc.h"
  66. #include <crypto/engine.h>
  67. #define CAAM_CRA_PRIORITY 3000
  68. /* max hash key is max split key size */
  69. #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
  70. #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
  71. #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
  72. #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
  73. CAAM_MAX_HASH_KEY_SIZE)
  74. #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
  75. /* caam context sizes for hashes: running digest + 8 */
  76. #define HASH_MSG_LEN 8
  77. #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
  78. static struct list_head hash_list;
  79. /* ahash per-session context */
  80. struct caam_hash_ctx {
  81. struct crypto_engine_ctx enginectx;
  82. u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
  83. u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
  84. u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
  85. u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
  86. u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
  87. dma_addr_t sh_desc_update_dma ____cacheline_aligned;
  88. dma_addr_t sh_desc_update_first_dma;
  89. dma_addr_t sh_desc_fin_dma;
  90. dma_addr_t sh_desc_digest_dma;
  91. enum dma_data_direction dir;
  92. enum dma_data_direction key_dir;
  93. struct device *jrdev;
  94. int ctx_len;
  95. struct alginfo adata;
  96. };
  97. /* ahash state */
  98. struct caam_hash_state {
  99. dma_addr_t buf_dma;
  100. dma_addr_t ctx_dma;
  101. int ctx_dma_len;
  102. u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
  103. int buflen;
  104. int next_buflen;
  105. u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
  106. int (*update)(struct ahash_request *req) ____cacheline_aligned;
  107. int (*final)(struct ahash_request *req);
  108. int (*finup)(struct ahash_request *req);
  109. struct ahash_edesc *edesc;
  110. void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
  111. void *context);
  112. };
  113. struct caam_export_state {
  114. u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
  115. u8 caam_ctx[MAX_CTX_LEN];
  116. int buflen;
  117. int (*update)(struct ahash_request *req);
  118. int (*final)(struct ahash_request *req);
  119. int (*finup)(struct ahash_request *req);
  120. };
  121. static inline bool is_cmac_aes(u32 algtype)
  122. {
  123. return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
  124. (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
  125. }
  126. /* Common job descriptor seq in/out ptr routines */
  127. /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
  128. static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
  129. struct caam_hash_state *state,
  130. int ctx_len)
  131. {
  132. state->ctx_dma_len = ctx_len;
  133. state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
  134. ctx_len, DMA_FROM_DEVICE);
  135. if (dma_mapping_error(jrdev, state->ctx_dma)) {
  136. dev_err(jrdev, "unable to map ctx\n");
  137. state->ctx_dma = 0;
  138. return -ENOMEM;
  139. }
  140. append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
  141. return 0;
  142. }
  143. /* Map current buffer in state (if length > 0) and put it in link table */
  144. static inline int buf_map_to_sec4_sg(struct device *jrdev,
  145. struct sec4_sg_entry *sec4_sg,
  146. struct caam_hash_state *state)
  147. {
  148. int buflen = state->buflen;
  149. if (!buflen)
  150. return 0;
  151. state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
  152. DMA_TO_DEVICE);
  153. if (dma_mapping_error(jrdev, state->buf_dma)) {
  154. dev_err(jrdev, "unable to map buf\n");
  155. state->buf_dma = 0;
  156. return -ENOMEM;
  157. }
  158. dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
  159. return 0;
  160. }
  161. /* Map state->caam_ctx, and add it to link table */
  162. static inline int ctx_map_to_sec4_sg(struct device *jrdev,
  163. struct caam_hash_state *state, int ctx_len,
  164. struct sec4_sg_entry *sec4_sg, u32 flag)
  165. {
  166. state->ctx_dma_len = ctx_len;
  167. state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
  168. if (dma_mapping_error(jrdev, state->ctx_dma)) {
  169. dev_err(jrdev, "unable to map ctx\n");
  170. state->ctx_dma = 0;
  171. return -ENOMEM;
  172. }
  173. dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
  174. return 0;
  175. }
  176. static int ahash_set_sh_desc(struct crypto_ahash *ahash)
  177. {
  178. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  179. int digestsize = crypto_ahash_digestsize(ahash);
  180. struct device *jrdev = ctx->jrdev;
  181. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  182. u32 *desc;
  183. ctx->adata.key_virt = ctx->key;
  184. /* ahash_update shared descriptor */
  185. desc = ctx->sh_desc_update;
  186. cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
  187. ctx->ctx_len, true, ctrlpriv->era);
  188. dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
  189. desc_bytes(desc), ctx->dir);
  190. print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
  191. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  192. 1);
  193. /* ahash_update_first shared descriptor */
  194. desc = ctx->sh_desc_update_first;
  195. cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
  196. ctx->ctx_len, false, ctrlpriv->era);
  197. dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
  198. desc_bytes(desc), ctx->dir);
  199. print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
  200. ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
  201. desc_bytes(desc), 1);
  202. /* ahash_final shared descriptor */
  203. desc = ctx->sh_desc_fin;
  204. cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
  205. ctx->ctx_len, true, ctrlpriv->era);
  206. dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
  207. desc_bytes(desc), ctx->dir);
  208. print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
  209. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  210. desc_bytes(desc), 1);
  211. /* ahash_digest shared descriptor */
  212. desc = ctx->sh_desc_digest;
  213. cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
  214. ctx->ctx_len, false, ctrlpriv->era);
  215. dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
  216. desc_bytes(desc), ctx->dir);
  217. print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
  218. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  219. desc_bytes(desc), 1);
  220. return 0;
  221. }
  222. static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
  223. {
  224. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  225. int digestsize = crypto_ahash_digestsize(ahash);
  226. struct device *jrdev = ctx->jrdev;
  227. u32 *desc;
  228. /* shared descriptor for ahash_update */
  229. desc = ctx->sh_desc_update;
  230. cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
  231. ctx->ctx_len, ctx->ctx_len);
  232. dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
  233. desc_bytes(desc), ctx->dir);
  234. print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
  235. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  236. 1);
  237. /* shared descriptor for ahash_{final,finup} */
  238. desc = ctx->sh_desc_fin;
  239. cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
  240. digestsize, ctx->ctx_len);
  241. dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
  242. desc_bytes(desc), ctx->dir);
  243. print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
  244. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  245. 1);
  246. /* key is immediate data for INIT and INITFINAL states */
  247. ctx->adata.key_virt = ctx->key;
  248. /* shared descriptor for first invocation of ahash_update */
  249. desc = ctx->sh_desc_update_first;
  250. cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
  251. ctx->ctx_len);
  252. dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
  253. desc_bytes(desc), ctx->dir);
  254. print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
  255. " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
  256. desc_bytes(desc), 1);
  257. /* shared descriptor for ahash_digest */
  258. desc = ctx->sh_desc_digest;
  259. cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
  260. digestsize, ctx->ctx_len);
  261. dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
  262. desc_bytes(desc), ctx->dir);
  263. print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
  264. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  265. 1);
  266. return 0;
  267. }
  268. static int acmac_set_sh_desc(struct crypto_ahash *ahash)
  269. {
  270. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  271. int digestsize = crypto_ahash_digestsize(ahash);
  272. struct device *jrdev = ctx->jrdev;
  273. u32 *desc;
  274. /* shared descriptor for ahash_update */
  275. desc = ctx->sh_desc_update;
  276. cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
  277. ctx->ctx_len, ctx->ctx_len);
  278. dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
  279. desc_bytes(desc), ctx->dir);
  280. print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
  281. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  282. desc_bytes(desc), 1);
  283. /* shared descriptor for ahash_{final,finup} */
  284. desc = ctx->sh_desc_fin;
  285. cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
  286. digestsize, ctx->ctx_len);
  287. dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
  288. desc_bytes(desc), ctx->dir);
  289. print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
  290. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  291. desc_bytes(desc), 1);
  292. /* shared descriptor for first invocation of ahash_update */
  293. desc = ctx->sh_desc_update_first;
  294. cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
  295. ctx->ctx_len);
  296. dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
  297. desc_bytes(desc), ctx->dir);
  298. print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
  299. " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
  300. desc_bytes(desc), 1);
  301. /* shared descriptor for ahash_digest */
  302. desc = ctx->sh_desc_digest;
  303. cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
  304. digestsize, ctx->ctx_len);
  305. dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
  306. desc_bytes(desc), ctx->dir);
  307. print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
  308. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  309. desc_bytes(desc), 1);
  310. return 0;
  311. }
  312. /* Digest hash size if it is too large */
  313. static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
  314. u32 digestsize)
  315. {
  316. struct device *jrdev = ctx->jrdev;
  317. u32 *desc;
  318. struct split_key_result result;
  319. dma_addr_t key_dma;
  320. int ret;
  321. desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
  322. if (!desc) {
  323. dev_err(jrdev, "unable to allocate key input memory\n");
  324. return -ENOMEM;
  325. }
  326. init_job_desc(desc, 0);
  327. key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
  328. if (dma_mapping_error(jrdev, key_dma)) {
  329. dev_err(jrdev, "unable to map key memory\n");
  330. kfree(desc);
  331. return -ENOMEM;
  332. }
  333. /* Job descriptor to perform unkeyed hash on key_in */
  334. append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
  335. OP_ALG_AS_INITFINAL);
  336. append_seq_in_ptr(desc, key_dma, *keylen, 0);
  337. append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
  338. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
  339. append_seq_out_ptr(desc, key_dma, digestsize, 0);
  340. append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
  341. LDST_SRCDST_BYTE_CONTEXT);
  342. print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
  343. DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
  344. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  345. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  346. 1);
  347. result.err = 0;
  348. init_completion(&result.completion);
  349. ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
  350. if (ret == -EINPROGRESS) {
  351. /* in progress */
  352. wait_for_completion(&result.completion);
  353. ret = result.err;
  354. print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
  355. DUMP_PREFIX_ADDRESS, 16, 4, key,
  356. digestsize, 1);
  357. }
  358. dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
  359. *keylen = digestsize;
  360. kfree(desc);
  361. return ret;
  362. }
  363. static int ahash_setkey(struct crypto_ahash *ahash,
  364. const u8 *key, unsigned int keylen)
  365. {
  366. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  367. struct device *jrdev = ctx->jrdev;
  368. int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
  369. int digestsize = crypto_ahash_digestsize(ahash);
  370. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  371. int ret;
  372. u8 *hashed_key = NULL;
  373. dev_dbg(jrdev, "keylen %d\n", keylen);
  374. if (keylen > blocksize) {
  375. hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
  376. if (!hashed_key)
  377. return -ENOMEM;
  378. ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
  379. if (ret)
  380. goto bad_free_key;
  381. key = hashed_key;
  382. }
  383. /*
  384. * If DKP is supported, use it in the shared descriptor to generate
  385. * the split key.
  386. */
  387. if (ctrlpriv->era >= 6) {
  388. ctx->adata.key_inline = true;
  389. ctx->adata.keylen = keylen;
  390. ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  391. OP_ALG_ALGSEL_MASK);
  392. if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
  393. goto bad_free_key;
  394. memcpy(ctx->key, key, keylen);
  395. /*
  396. * In case |user key| > |derived key|, using DKP<imm,imm>
  397. * would result in invalid opcodes (last bytes of user key) in
  398. * the resulting descriptor. Use DKP<ptr,imm> instead => both
  399. * virtual and dma key addresses are needed.
  400. */
  401. if (keylen > ctx->adata.keylen_pad)
  402. dma_sync_single_for_device(ctx->jrdev,
  403. ctx->adata.key_dma,
  404. ctx->adata.keylen_pad,
  405. DMA_TO_DEVICE);
  406. } else {
  407. ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
  408. keylen, CAAM_MAX_HASH_KEY_SIZE);
  409. if (ret)
  410. goto bad_free_key;
  411. }
  412. kfree(hashed_key);
  413. return ahash_set_sh_desc(ahash);
  414. bad_free_key:
  415. kfree(hashed_key);
  416. return -EINVAL;
  417. }
  418. static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
  419. unsigned int keylen)
  420. {
  421. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  422. struct device *jrdev = ctx->jrdev;
  423. if (keylen != AES_KEYSIZE_128)
  424. return -EINVAL;
  425. memcpy(ctx->key, key, keylen);
  426. dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
  427. DMA_TO_DEVICE);
  428. ctx->adata.keylen = keylen;
  429. print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
  430. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
  431. return axcbc_set_sh_desc(ahash);
  432. }
  433. static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
  434. unsigned int keylen)
  435. {
  436. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  437. int err;
  438. err = aes_check_keylen(keylen);
  439. if (err)
  440. return err;
  441. /* key is immediate data for all cmac shared descriptors */
  442. ctx->adata.key_virt = key;
  443. ctx->adata.keylen = keylen;
  444. print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
  445. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  446. return acmac_set_sh_desc(ahash);
  447. }
  448. /*
  449. * ahash_edesc - s/w-extended ahash descriptor
  450. * @sec4_sg_dma: physical mapped address of h/w link table
  451. * @src_nents: number of segments in input scatterlist
  452. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  453. * @bklog: stored to determine if the request needs backlog
  454. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  455. * @sec4_sg: h/w link table
  456. */
  457. struct ahash_edesc {
  458. dma_addr_t sec4_sg_dma;
  459. int src_nents;
  460. int sec4_sg_bytes;
  461. bool bklog;
  462. u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
  463. struct sec4_sg_entry sec4_sg[];
  464. };
  465. static inline void ahash_unmap(struct device *dev,
  466. struct ahash_edesc *edesc,
  467. struct ahash_request *req, int dst_len)
  468. {
  469. struct caam_hash_state *state = ahash_request_ctx(req);
  470. if (edesc->src_nents)
  471. dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
  472. if (edesc->sec4_sg_bytes)
  473. dma_unmap_single(dev, edesc->sec4_sg_dma,
  474. edesc->sec4_sg_bytes, DMA_TO_DEVICE);
  475. if (state->buf_dma) {
  476. dma_unmap_single(dev, state->buf_dma, state->buflen,
  477. DMA_TO_DEVICE);
  478. state->buf_dma = 0;
  479. }
  480. }
  481. static inline void ahash_unmap_ctx(struct device *dev,
  482. struct ahash_edesc *edesc,
  483. struct ahash_request *req, int dst_len, u32 flag)
  484. {
  485. struct caam_hash_state *state = ahash_request_ctx(req);
  486. if (state->ctx_dma) {
  487. dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
  488. state->ctx_dma = 0;
  489. }
  490. ahash_unmap(dev, edesc, req, dst_len);
  491. }
  492. static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
  493. void *context, enum dma_data_direction dir)
  494. {
  495. struct ahash_request *req = context;
  496. struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
  497. struct ahash_edesc *edesc;
  498. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  499. int digestsize = crypto_ahash_digestsize(ahash);
  500. struct caam_hash_state *state = ahash_request_ctx(req);
  501. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  502. int ecode = 0;
  503. bool has_bklog;
  504. dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  505. edesc = state->edesc;
  506. has_bklog = edesc->bklog;
  507. if (err)
  508. ecode = caam_jr_strstatus(jrdev, err);
  509. ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
  510. memcpy(req->result, state->caam_ctx, digestsize);
  511. kfree(edesc);
  512. print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
  513. DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
  514. ctx->ctx_len, 1);
  515. /*
  516. * If no backlog flag, the completion of the request is done
  517. * by CAAM, not crypto engine.
  518. */
  519. if (!has_bklog)
  520. req->base.complete(&req->base, ecode);
  521. else
  522. crypto_finalize_hash_request(jrp->engine, req, ecode);
  523. }
  524. static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
  525. void *context)
  526. {
  527. ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
  528. }
  529. static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
  530. void *context)
  531. {
  532. ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
  533. }
  534. static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
  535. void *context, enum dma_data_direction dir)
  536. {
  537. struct ahash_request *req = context;
  538. struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
  539. struct ahash_edesc *edesc;
  540. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  541. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  542. struct caam_hash_state *state = ahash_request_ctx(req);
  543. int digestsize = crypto_ahash_digestsize(ahash);
  544. int ecode = 0;
  545. bool has_bklog;
  546. dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  547. edesc = state->edesc;
  548. has_bklog = edesc->bklog;
  549. if (err)
  550. ecode = caam_jr_strstatus(jrdev, err);
  551. ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
  552. kfree(edesc);
  553. scatterwalk_map_and_copy(state->buf, req->src,
  554. req->nbytes - state->next_buflen,
  555. state->next_buflen, 0);
  556. state->buflen = state->next_buflen;
  557. print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
  558. DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
  559. state->buflen, 1);
  560. print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
  561. DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
  562. ctx->ctx_len, 1);
  563. if (req->result)
  564. print_hex_dump_debug("result@"__stringify(__LINE__)": ",
  565. DUMP_PREFIX_ADDRESS, 16, 4, req->result,
  566. digestsize, 1);
  567. /*
  568. * If no backlog flag, the completion of the request is done
  569. * by CAAM, not crypto engine.
  570. */
  571. if (!has_bklog)
  572. req->base.complete(&req->base, ecode);
  573. else
  574. crypto_finalize_hash_request(jrp->engine, req, ecode);
  575. }
  576. static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
  577. void *context)
  578. {
  579. ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
  580. }
  581. static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
  582. void *context)
  583. {
  584. ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
  585. }
  586. /*
  587. * Allocate an enhanced descriptor, which contains the hardware descriptor
  588. * and space for hardware scatter table containing sg_num entries.
  589. */
  590. static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
  591. int sg_num, u32 *sh_desc,
  592. dma_addr_t sh_desc_dma)
  593. {
  594. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  595. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  596. struct caam_hash_state *state = ahash_request_ctx(req);
  597. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  598. GFP_KERNEL : GFP_ATOMIC;
  599. struct ahash_edesc *edesc;
  600. unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
  601. edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
  602. if (!edesc) {
  603. dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
  604. return NULL;
  605. }
  606. state->edesc = edesc;
  607. init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
  608. HDR_SHARE_DEFER | HDR_REVERSE);
  609. return edesc;
  610. }
  611. static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
  612. struct ahash_edesc *edesc,
  613. struct ahash_request *req, int nents,
  614. unsigned int first_sg,
  615. unsigned int first_bytes, size_t to_hash)
  616. {
  617. dma_addr_t src_dma;
  618. u32 options;
  619. if (nents > 1 || first_sg) {
  620. struct sec4_sg_entry *sg = edesc->sec4_sg;
  621. unsigned int sgsize = sizeof(*sg) *
  622. pad_sg_nents(first_sg + nents);
  623. sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
  624. src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
  625. if (dma_mapping_error(ctx->jrdev, src_dma)) {
  626. dev_err(ctx->jrdev, "unable to map S/G table\n");
  627. return -ENOMEM;
  628. }
  629. edesc->sec4_sg_bytes = sgsize;
  630. edesc->sec4_sg_dma = src_dma;
  631. options = LDST_SGF;
  632. } else {
  633. src_dma = sg_dma_address(req->src);
  634. options = 0;
  635. }
  636. append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
  637. options);
  638. return 0;
  639. }
  640. static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
  641. {
  642. struct ahash_request *req = ahash_request_cast(areq);
  643. struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
  644. struct caam_hash_state *state = ahash_request_ctx(req);
  645. struct device *jrdev = ctx->jrdev;
  646. u32 *desc = state->edesc->hw_desc;
  647. int ret;
  648. state->edesc->bklog = true;
  649. ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
  650. if (ret == -ENOSPC && engine->retry_support)
  651. return ret;
  652. if (ret != -EINPROGRESS) {
  653. ahash_unmap(jrdev, state->edesc, req, 0);
  654. kfree(state->edesc);
  655. } else {
  656. ret = 0;
  657. }
  658. return ret;
  659. }
  660. static int ahash_enqueue_req(struct device *jrdev,
  661. void (*cbk)(struct device *jrdev, u32 *desc,
  662. u32 err, void *context),
  663. struct ahash_request *req,
  664. int dst_len, enum dma_data_direction dir)
  665. {
  666. struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
  667. struct caam_hash_state *state = ahash_request_ctx(req);
  668. struct ahash_edesc *edesc = state->edesc;
  669. u32 *desc = edesc->hw_desc;
  670. int ret;
  671. state->ahash_op_done = cbk;
  672. /*
  673. * Only the backlog request are sent to crypto-engine since the others
  674. * can be handled by CAAM, if free, especially since JR has up to 1024
  675. * entries (more than the 10 entries from crypto-engine).
  676. */
  677. if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
  678. ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
  679. req);
  680. else
  681. ret = caam_jr_enqueue(jrdev, desc, cbk, req);
  682. if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
  683. ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
  684. kfree(edesc);
  685. }
  686. return ret;
  687. }
  688. /* submit update job descriptor */
  689. static int ahash_update_ctx(struct ahash_request *req)
  690. {
  691. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  692. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  693. struct caam_hash_state *state = ahash_request_ctx(req);
  694. struct device *jrdev = ctx->jrdev;
  695. u8 *buf = state->buf;
  696. int *buflen = &state->buflen;
  697. int *next_buflen = &state->next_buflen;
  698. int blocksize = crypto_ahash_blocksize(ahash);
  699. int in_len = *buflen + req->nbytes, to_hash;
  700. u32 *desc;
  701. int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
  702. struct ahash_edesc *edesc;
  703. int ret = 0;
  704. *next_buflen = in_len & (blocksize - 1);
  705. to_hash = in_len - *next_buflen;
  706. /*
  707. * For XCBC and CMAC, if to_hash is multiple of block size,
  708. * keep last block in internal buffer
  709. */
  710. if ((is_xcbc_aes(ctx->adata.algtype) ||
  711. is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
  712. (*next_buflen == 0)) {
  713. *next_buflen = blocksize;
  714. to_hash -= blocksize;
  715. }
  716. if (to_hash) {
  717. int pad_nents;
  718. int src_len = req->nbytes - *next_buflen;
  719. src_nents = sg_nents_for_len(req->src, src_len);
  720. if (src_nents < 0) {
  721. dev_err(jrdev, "Invalid number of src SG.\n");
  722. return src_nents;
  723. }
  724. if (src_nents) {
  725. mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
  726. DMA_TO_DEVICE);
  727. if (!mapped_nents) {
  728. dev_err(jrdev, "unable to DMA map source\n");
  729. return -ENOMEM;
  730. }
  731. } else {
  732. mapped_nents = 0;
  733. }
  734. sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
  735. pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
  736. sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
  737. /*
  738. * allocate space for base edesc and hw desc commands,
  739. * link tables
  740. */
  741. edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
  742. ctx->sh_desc_update_dma);
  743. if (!edesc) {
  744. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  745. return -ENOMEM;
  746. }
  747. edesc->src_nents = src_nents;
  748. edesc->sec4_sg_bytes = sec4_sg_bytes;
  749. ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
  750. edesc->sec4_sg, DMA_BIDIRECTIONAL);
  751. if (ret)
  752. goto unmap_ctx;
  753. ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
  754. if (ret)
  755. goto unmap_ctx;
  756. if (mapped_nents)
  757. sg_to_sec4_sg_last(req->src, src_len,
  758. edesc->sec4_sg + sec4_sg_src_index,
  759. 0);
  760. else
  761. sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
  762. 1);
  763. desc = edesc->hw_desc;
  764. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  765. sec4_sg_bytes,
  766. DMA_TO_DEVICE);
  767. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  768. dev_err(jrdev, "unable to map S/G table\n");
  769. ret = -ENOMEM;
  770. goto unmap_ctx;
  771. }
  772. append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
  773. to_hash, LDST_SGF);
  774. append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
  775. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  776. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  777. desc_bytes(desc), 1);
  778. ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
  779. ctx->ctx_len, DMA_BIDIRECTIONAL);
  780. } else if (*next_buflen) {
  781. scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
  782. req->nbytes, 0);
  783. *buflen = *next_buflen;
  784. print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
  785. DUMP_PREFIX_ADDRESS, 16, 4, buf,
  786. *buflen, 1);
  787. }
  788. return ret;
  789. unmap_ctx:
  790. ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
  791. kfree(edesc);
  792. return ret;
  793. }
  794. static int ahash_final_ctx(struct ahash_request *req)
  795. {
  796. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  797. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  798. struct caam_hash_state *state = ahash_request_ctx(req);
  799. struct device *jrdev = ctx->jrdev;
  800. int buflen = state->buflen;
  801. u32 *desc;
  802. int sec4_sg_bytes;
  803. int digestsize = crypto_ahash_digestsize(ahash);
  804. struct ahash_edesc *edesc;
  805. int ret;
  806. sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
  807. sizeof(struct sec4_sg_entry);
  808. /* allocate space for base edesc and hw desc commands, link tables */
  809. edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
  810. ctx->sh_desc_fin_dma);
  811. if (!edesc)
  812. return -ENOMEM;
  813. desc = edesc->hw_desc;
  814. edesc->sec4_sg_bytes = sec4_sg_bytes;
  815. ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
  816. edesc->sec4_sg, DMA_BIDIRECTIONAL);
  817. if (ret)
  818. goto unmap_ctx;
  819. ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
  820. if (ret)
  821. goto unmap_ctx;
  822. sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
  823. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  824. sec4_sg_bytes, DMA_TO_DEVICE);
  825. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  826. dev_err(jrdev, "unable to map S/G table\n");
  827. ret = -ENOMEM;
  828. goto unmap_ctx;
  829. }
  830. append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
  831. LDST_SGF);
  832. append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
  833. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  834. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  835. 1);
  836. return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
  837. digestsize, DMA_BIDIRECTIONAL);
  838. unmap_ctx:
  839. ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
  840. kfree(edesc);
  841. return ret;
  842. }
  843. static int ahash_finup_ctx(struct ahash_request *req)
  844. {
  845. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  846. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  847. struct caam_hash_state *state = ahash_request_ctx(req);
  848. struct device *jrdev = ctx->jrdev;
  849. int buflen = state->buflen;
  850. u32 *desc;
  851. int sec4_sg_src_index;
  852. int src_nents, mapped_nents;
  853. int digestsize = crypto_ahash_digestsize(ahash);
  854. struct ahash_edesc *edesc;
  855. int ret;
  856. src_nents = sg_nents_for_len(req->src, req->nbytes);
  857. if (src_nents < 0) {
  858. dev_err(jrdev, "Invalid number of src SG.\n");
  859. return src_nents;
  860. }
  861. if (src_nents) {
  862. mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
  863. DMA_TO_DEVICE);
  864. if (!mapped_nents) {
  865. dev_err(jrdev, "unable to DMA map source\n");
  866. return -ENOMEM;
  867. }
  868. } else {
  869. mapped_nents = 0;
  870. }
  871. sec4_sg_src_index = 1 + (buflen ? 1 : 0);
  872. /* allocate space for base edesc and hw desc commands, link tables */
  873. edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
  874. ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
  875. if (!edesc) {
  876. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  877. return -ENOMEM;
  878. }
  879. desc = edesc->hw_desc;
  880. edesc->src_nents = src_nents;
  881. ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
  882. edesc->sec4_sg, DMA_BIDIRECTIONAL);
  883. if (ret)
  884. goto unmap_ctx;
  885. ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
  886. if (ret)
  887. goto unmap_ctx;
  888. ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
  889. sec4_sg_src_index, ctx->ctx_len + buflen,
  890. req->nbytes);
  891. if (ret)
  892. goto unmap_ctx;
  893. append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
  894. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  895. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  896. 1);
  897. return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
  898. digestsize, DMA_BIDIRECTIONAL);
  899. unmap_ctx:
  900. ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
  901. kfree(edesc);
  902. return ret;
  903. }
  904. static int ahash_digest(struct ahash_request *req)
  905. {
  906. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  907. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  908. struct caam_hash_state *state = ahash_request_ctx(req);
  909. struct device *jrdev = ctx->jrdev;
  910. u32 *desc;
  911. int digestsize = crypto_ahash_digestsize(ahash);
  912. int src_nents, mapped_nents;
  913. struct ahash_edesc *edesc;
  914. int ret;
  915. state->buf_dma = 0;
  916. src_nents = sg_nents_for_len(req->src, req->nbytes);
  917. if (src_nents < 0) {
  918. dev_err(jrdev, "Invalid number of src SG.\n");
  919. return src_nents;
  920. }
  921. if (src_nents) {
  922. mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
  923. DMA_TO_DEVICE);
  924. if (!mapped_nents) {
  925. dev_err(jrdev, "unable to map source for DMA\n");
  926. return -ENOMEM;
  927. }
  928. } else {
  929. mapped_nents = 0;
  930. }
  931. /* allocate space for base edesc and hw desc commands, link tables */
  932. edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
  933. ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
  934. if (!edesc) {
  935. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  936. return -ENOMEM;
  937. }
  938. edesc->src_nents = src_nents;
  939. ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
  940. req->nbytes);
  941. if (ret) {
  942. ahash_unmap(jrdev, edesc, req, digestsize);
  943. kfree(edesc);
  944. return ret;
  945. }
  946. desc = edesc->hw_desc;
  947. ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
  948. if (ret) {
  949. ahash_unmap(jrdev, edesc, req, digestsize);
  950. kfree(edesc);
  951. return -ENOMEM;
  952. }
  953. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  954. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  955. 1);
  956. return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
  957. DMA_FROM_DEVICE);
  958. }
  959. /* submit ahash final if it the first job descriptor */
  960. static int ahash_final_no_ctx(struct ahash_request *req)
  961. {
  962. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  963. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  964. struct caam_hash_state *state = ahash_request_ctx(req);
  965. struct device *jrdev = ctx->jrdev;
  966. u8 *buf = state->buf;
  967. int buflen = state->buflen;
  968. u32 *desc;
  969. int digestsize = crypto_ahash_digestsize(ahash);
  970. struct ahash_edesc *edesc;
  971. int ret;
  972. /* allocate space for base edesc and hw desc commands, link tables */
  973. edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
  974. ctx->sh_desc_digest_dma);
  975. if (!edesc)
  976. return -ENOMEM;
  977. desc = edesc->hw_desc;
  978. if (buflen) {
  979. state->buf_dma = dma_map_single(jrdev, buf, buflen,
  980. DMA_TO_DEVICE);
  981. if (dma_mapping_error(jrdev, state->buf_dma)) {
  982. dev_err(jrdev, "unable to map src\n");
  983. goto unmap;
  984. }
  985. append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
  986. }
  987. ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
  988. if (ret)
  989. goto unmap;
  990. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  991. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  992. 1);
  993. return ahash_enqueue_req(jrdev, ahash_done, req,
  994. digestsize, DMA_FROM_DEVICE);
  995. unmap:
  996. ahash_unmap(jrdev, edesc, req, digestsize);
  997. kfree(edesc);
  998. return -ENOMEM;
  999. }
  1000. /* submit ahash update if it the first job descriptor after update */
  1001. static int ahash_update_no_ctx(struct ahash_request *req)
  1002. {
  1003. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  1004. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  1005. struct caam_hash_state *state = ahash_request_ctx(req);
  1006. struct device *jrdev = ctx->jrdev;
  1007. u8 *buf = state->buf;
  1008. int *buflen = &state->buflen;
  1009. int *next_buflen = &state->next_buflen;
  1010. int blocksize = crypto_ahash_blocksize(ahash);
  1011. int in_len = *buflen + req->nbytes, to_hash;
  1012. int sec4_sg_bytes, src_nents, mapped_nents;
  1013. struct ahash_edesc *edesc;
  1014. u32 *desc;
  1015. int ret = 0;
  1016. *next_buflen = in_len & (blocksize - 1);
  1017. to_hash = in_len - *next_buflen;
  1018. /*
  1019. * For XCBC and CMAC, if to_hash is multiple of block size,
  1020. * keep last block in internal buffer
  1021. */
  1022. if ((is_xcbc_aes(ctx->adata.algtype) ||
  1023. is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
  1024. (*next_buflen == 0)) {
  1025. *next_buflen = blocksize;
  1026. to_hash -= blocksize;
  1027. }
  1028. if (to_hash) {
  1029. int pad_nents;
  1030. int src_len = req->nbytes - *next_buflen;
  1031. src_nents = sg_nents_for_len(req->src, src_len);
  1032. if (src_nents < 0) {
  1033. dev_err(jrdev, "Invalid number of src SG.\n");
  1034. return src_nents;
  1035. }
  1036. if (src_nents) {
  1037. mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
  1038. DMA_TO_DEVICE);
  1039. if (!mapped_nents) {
  1040. dev_err(jrdev, "unable to DMA map source\n");
  1041. return -ENOMEM;
  1042. }
  1043. } else {
  1044. mapped_nents = 0;
  1045. }
  1046. pad_nents = pad_sg_nents(1 + mapped_nents);
  1047. sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
  1048. /*
  1049. * allocate space for base edesc and hw desc commands,
  1050. * link tables
  1051. */
  1052. edesc = ahash_edesc_alloc(req, pad_nents,
  1053. ctx->sh_desc_update_first,
  1054. ctx->sh_desc_update_first_dma);
  1055. if (!edesc) {
  1056. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1057. return -ENOMEM;
  1058. }
  1059. edesc->src_nents = src_nents;
  1060. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1061. ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
  1062. if (ret)
  1063. goto unmap_ctx;
  1064. sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
  1065. desc = edesc->hw_desc;
  1066. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1067. sec4_sg_bytes,
  1068. DMA_TO_DEVICE);
  1069. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1070. dev_err(jrdev, "unable to map S/G table\n");
  1071. ret = -ENOMEM;
  1072. goto unmap_ctx;
  1073. }
  1074. append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
  1075. ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
  1076. if (ret)
  1077. goto unmap_ctx;
  1078. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  1079. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1080. desc_bytes(desc), 1);
  1081. ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
  1082. ctx->ctx_len, DMA_TO_DEVICE);
  1083. if ((ret != -EINPROGRESS) && (ret != -EBUSY))
  1084. return ret;
  1085. state->update = ahash_update_ctx;
  1086. state->finup = ahash_finup_ctx;
  1087. state->final = ahash_final_ctx;
  1088. } else if (*next_buflen) {
  1089. scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
  1090. req->nbytes, 0);
  1091. *buflen = *next_buflen;
  1092. print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
  1093. DUMP_PREFIX_ADDRESS, 16, 4, buf,
  1094. *buflen, 1);
  1095. }
  1096. return ret;
  1097. unmap_ctx:
  1098. ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
  1099. kfree(edesc);
  1100. return ret;
  1101. }
  1102. /* submit ahash finup if it the first job descriptor after update */
  1103. static int ahash_finup_no_ctx(struct ahash_request *req)
  1104. {
  1105. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  1106. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  1107. struct caam_hash_state *state = ahash_request_ctx(req);
  1108. struct device *jrdev = ctx->jrdev;
  1109. int buflen = state->buflen;
  1110. u32 *desc;
  1111. int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
  1112. int digestsize = crypto_ahash_digestsize(ahash);
  1113. struct ahash_edesc *edesc;
  1114. int ret;
  1115. src_nents = sg_nents_for_len(req->src, req->nbytes);
  1116. if (src_nents < 0) {
  1117. dev_err(jrdev, "Invalid number of src SG.\n");
  1118. return src_nents;
  1119. }
  1120. if (src_nents) {
  1121. mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
  1122. DMA_TO_DEVICE);
  1123. if (!mapped_nents) {
  1124. dev_err(jrdev, "unable to DMA map source\n");
  1125. return -ENOMEM;
  1126. }
  1127. } else {
  1128. mapped_nents = 0;
  1129. }
  1130. sec4_sg_src_index = 2;
  1131. sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
  1132. sizeof(struct sec4_sg_entry);
  1133. /* allocate space for base edesc and hw desc commands, link tables */
  1134. edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
  1135. ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
  1136. if (!edesc) {
  1137. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1138. return -ENOMEM;
  1139. }
  1140. desc = edesc->hw_desc;
  1141. edesc->src_nents = src_nents;
  1142. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1143. ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
  1144. if (ret)
  1145. goto unmap;
  1146. ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
  1147. req->nbytes);
  1148. if (ret) {
  1149. dev_err(jrdev, "unable to map S/G table\n");
  1150. goto unmap;
  1151. }
  1152. ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
  1153. if (ret)
  1154. goto unmap;
  1155. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  1156. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
  1157. 1);
  1158. return ahash_enqueue_req(jrdev, ahash_done, req,
  1159. digestsize, DMA_FROM_DEVICE);
  1160. unmap:
  1161. ahash_unmap(jrdev, edesc, req, digestsize);
  1162. kfree(edesc);
  1163. return -ENOMEM;
  1164. }
  1165. /* submit first update job descriptor after init */
  1166. static int ahash_update_first(struct ahash_request *req)
  1167. {
  1168. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  1169. struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  1170. struct caam_hash_state *state = ahash_request_ctx(req);
  1171. struct device *jrdev = ctx->jrdev;
  1172. u8 *buf = state->buf;
  1173. int *buflen = &state->buflen;
  1174. int *next_buflen = &state->next_buflen;
  1175. int to_hash;
  1176. int blocksize = crypto_ahash_blocksize(ahash);
  1177. u32 *desc;
  1178. int src_nents, mapped_nents;
  1179. struct ahash_edesc *edesc;
  1180. int ret = 0;
  1181. *next_buflen = req->nbytes & (blocksize - 1);
  1182. to_hash = req->nbytes - *next_buflen;
  1183. /*
  1184. * For XCBC and CMAC, if to_hash is multiple of block size,
  1185. * keep last block in internal buffer
  1186. */
  1187. if ((is_xcbc_aes(ctx->adata.algtype) ||
  1188. is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
  1189. (*next_buflen == 0)) {
  1190. *next_buflen = blocksize;
  1191. to_hash -= blocksize;
  1192. }
  1193. if (to_hash) {
  1194. src_nents = sg_nents_for_len(req->src,
  1195. req->nbytes - *next_buflen);
  1196. if (src_nents < 0) {
  1197. dev_err(jrdev, "Invalid number of src SG.\n");
  1198. return src_nents;
  1199. }
  1200. if (src_nents) {
  1201. mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
  1202. DMA_TO_DEVICE);
  1203. if (!mapped_nents) {
  1204. dev_err(jrdev, "unable to map source for DMA\n");
  1205. return -ENOMEM;
  1206. }
  1207. } else {
  1208. mapped_nents = 0;
  1209. }
  1210. /*
  1211. * allocate space for base edesc and hw desc commands,
  1212. * link tables
  1213. */
  1214. edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
  1215. mapped_nents : 0,
  1216. ctx->sh_desc_update_first,
  1217. ctx->sh_desc_update_first_dma);
  1218. if (!edesc) {
  1219. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1220. return -ENOMEM;
  1221. }
  1222. edesc->src_nents = src_nents;
  1223. ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
  1224. to_hash);
  1225. if (ret)
  1226. goto unmap_ctx;
  1227. desc = edesc->hw_desc;
  1228. ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
  1229. if (ret)
  1230. goto unmap_ctx;
  1231. print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
  1232. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1233. desc_bytes(desc), 1);
  1234. ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
  1235. ctx->ctx_len, DMA_TO_DEVICE);
  1236. if ((ret != -EINPROGRESS) && (ret != -EBUSY))
  1237. return ret;
  1238. state->update = ahash_update_ctx;
  1239. state->finup = ahash_finup_ctx;
  1240. state->final = ahash_final_ctx;
  1241. } else if (*next_buflen) {
  1242. state->update = ahash_update_no_ctx;
  1243. state->finup = ahash_finup_no_ctx;
  1244. state->final = ahash_final_no_ctx;
  1245. scatterwalk_map_and_copy(buf, req->src, 0,
  1246. req->nbytes, 0);
  1247. *buflen = *next_buflen;
  1248. print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
  1249. DUMP_PREFIX_ADDRESS, 16, 4, buf,
  1250. *buflen, 1);
  1251. }
  1252. return ret;
  1253. unmap_ctx:
  1254. ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
  1255. kfree(edesc);
  1256. return ret;
  1257. }
  1258. static int ahash_finup_first(struct ahash_request *req)
  1259. {
  1260. return ahash_digest(req);
  1261. }
  1262. static int ahash_init(struct ahash_request *req)
  1263. {
  1264. struct caam_hash_state *state = ahash_request_ctx(req);
  1265. state->update = ahash_update_first;
  1266. state->finup = ahash_finup_first;
  1267. state->final = ahash_final_no_ctx;
  1268. state->ctx_dma = 0;
  1269. state->ctx_dma_len = 0;
  1270. state->buf_dma = 0;
  1271. state->buflen = 0;
  1272. state->next_buflen = 0;
  1273. return 0;
  1274. }
  1275. static int ahash_update(struct ahash_request *req)
  1276. {
  1277. struct caam_hash_state *state = ahash_request_ctx(req);
  1278. return state->update(req);
  1279. }
  1280. static int ahash_finup(struct ahash_request *req)
  1281. {
  1282. struct caam_hash_state *state = ahash_request_ctx(req);
  1283. return state->finup(req);
  1284. }
  1285. static int ahash_final(struct ahash_request *req)
  1286. {
  1287. struct caam_hash_state *state = ahash_request_ctx(req);
  1288. return state->final(req);
  1289. }
  1290. static int ahash_export(struct ahash_request *req, void *out)
  1291. {
  1292. struct caam_hash_state *state = ahash_request_ctx(req);
  1293. struct caam_export_state *export = out;
  1294. u8 *buf = state->buf;
  1295. int len = state->buflen;
  1296. memcpy(export->buf, buf, len);
  1297. memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
  1298. export->buflen = len;
  1299. export->update = state->update;
  1300. export->final = state->final;
  1301. export->finup = state->finup;
  1302. return 0;
  1303. }
  1304. static int ahash_import(struct ahash_request *req, const void *in)
  1305. {
  1306. struct caam_hash_state *state = ahash_request_ctx(req);
  1307. const struct caam_export_state *export = in;
  1308. memset(state, 0, sizeof(*state));
  1309. memcpy(state->buf, export->buf, export->buflen);
  1310. memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
  1311. state->buflen = export->buflen;
  1312. state->update = export->update;
  1313. state->final = export->final;
  1314. state->finup = export->finup;
  1315. return 0;
  1316. }
  1317. struct caam_hash_template {
  1318. char name[CRYPTO_MAX_ALG_NAME];
  1319. char driver_name[CRYPTO_MAX_ALG_NAME];
  1320. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1321. char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
  1322. unsigned int blocksize;
  1323. struct ahash_alg template_ahash;
  1324. u32 alg_type;
  1325. };
  1326. /* ahash descriptors */
  1327. static struct caam_hash_template driver_hash[] = {
  1328. {
  1329. .name = "sha1",
  1330. .driver_name = "sha1-caam",
  1331. .hmac_name = "hmac(sha1)",
  1332. .hmac_driver_name = "hmac-sha1-caam",
  1333. .blocksize = SHA1_BLOCK_SIZE,
  1334. .template_ahash = {
  1335. .init = ahash_init,
  1336. .update = ahash_update,
  1337. .final = ahash_final,
  1338. .finup = ahash_finup,
  1339. .digest = ahash_digest,
  1340. .export = ahash_export,
  1341. .import = ahash_import,
  1342. .setkey = ahash_setkey,
  1343. .halg = {
  1344. .digestsize = SHA1_DIGEST_SIZE,
  1345. .statesize = sizeof(struct caam_export_state),
  1346. },
  1347. },
  1348. .alg_type = OP_ALG_ALGSEL_SHA1,
  1349. }, {
  1350. .name = "sha224",
  1351. .driver_name = "sha224-caam",
  1352. .hmac_name = "hmac(sha224)",
  1353. .hmac_driver_name = "hmac-sha224-caam",
  1354. .blocksize = SHA224_BLOCK_SIZE,
  1355. .template_ahash = {
  1356. .init = ahash_init,
  1357. .update = ahash_update,
  1358. .final = ahash_final,
  1359. .finup = ahash_finup,
  1360. .digest = ahash_digest,
  1361. .export = ahash_export,
  1362. .import = ahash_import,
  1363. .setkey = ahash_setkey,
  1364. .halg = {
  1365. .digestsize = SHA224_DIGEST_SIZE,
  1366. .statesize = sizeof(struct caam_export_state),
  1367. },
  1368. },
  1369. .alg_type = OP_ALG_ALGSEL_SHA224,
  1370. }, {
  1371. .name = "sha256",
  1372. .driver_name = "sha256-caam",
  1373. .hmac_name = "hmac(sha256)",
  1374. .hmac_driver_name = "hmac-sha256-caam",
  1375. .blocksize = SHA256_BLOCK_SIZE,
  1376. .template_ahash = {
  1377. .init = ahash_init,
  1378. .update = ahash_update,
  1379. .final = ahash_final,
  1380. .finup = ahash_finup,
  1381. .digest = ahash_digest,
  1382. .export = ahash_export,
  1383. .import = ahash_import,
  1384. .setkey = ahash_setkey,
  1385. .halg = {
  1386. .digestsize = SHA256_DIGEST_SIZE,
  1387. .statesize = sizeof(struct caam_export_state),
  1388. },
  1389. },
  1390. .alg_type = OP_ALG_ALGSEL_SHA256,
  1391. }, {
  1392. .name = "sha384",
  1393. .driver_name = "sha384-caam",
  1394. .hmac_name = "hmac(sha384)",
  1395. .hmac_driver_name = "hmac-sha384-caam",
  1396. .blocksize = SHA384_BLOCK_SIZE,
  1397. .template_ahash = {
  1398. .init = ahash_init,
  1399. .update = ahash_update,
  1400. .final = ahash_final,
  1401. .finup = ahash_finup,
  1402. .digest = ahash_digest,
  1403. .export = ahash_export,
  1404. .import = ahash_import,
  1405. .setkey = ahash_setkey,
  1406. .halg = {
  1407. .digestsize = SHA384_DIGEST_SIZE,
  1408. .statesize = sizeof(struct caam_export_state),
  1409. },
  1410. },
  1411. .alg_type = OP_ALG_ALGSEL_SHA384,
  1412. }, {
  1413. .name = "sha512",
  1414. .driver_name = "sha512-caam",
  1415. .hmac_name = "hmac(sha512)",
  1416. .hmac_driver_name = "hmac-sha512-caam",
  1417. .blocksize = SHA512_BLOCK_SIZE,
  1418. .template_ahash = {
  1419. .init = ahash_init,
  1420. .update = ahash_update,
  1421. .final = ahash_final,
  1422. .finup = ahash_finup,
  1423. .digest = ahash_digest,
  1424. .export = ahash_export,
  1425. .import = ahash_import,
  1426. .setkey = ahash_setkey,
  1427. .halg = {
  1428. .digestsize = SHA512_DIGEST_SIZE,
  1429. .statesize = sizeof(struct caam_export_state),
  1430. },
  1431. },
  1432. .alg_type = OP_ALG_ALGSEL_SHA512,
  1433. }, {
  1434. .name = "md5",
  1435. .driver_name = "md5-caam",
  1436. .hmac_name = "hmac(md5)",
  1437. .hmac_driver_name = "hmac-md5-caam",
  1438. .blocksize = MD5_BLOCK_WORDS * 4,
  1439. .template_ahash = {
  1440. .init = ahash_init,
  1441. .update = ahash_update,
  1442. .final = ahash_final,
  1443. .finup = ahash_finup,
  1444. .digest = ahash_digest,
  1445. .export = ahash_export,
  1446. .import = ahash_import,
  1447. .setkey = ahash_setkey,
  1448. .halg = {
  1449. .digestsize = MD5_DIGEST_SIZE,
  1450. .statesize = sizeof(struct caam_export_state),
  1451. },
  1452. },
  1453. .alg_type = OP_ALG_ALGSEL_MD5,
  1454. }, {
  1455. .hmac_name = "xcbc(aes)",
  1456. .hmac_driver_name = "xcbc-aes-caam",
  1457. .blocksize = AES_BLOCK_SIZE,
  1458. .template_ahash = {
  1459. .init = ahash_init,
  1460. .update = ahash_update,
  1461. .final = ahash_final,
  1462. .finup = ahash_finup,
  1463. .digest = ahash_digest,
  1464. .export = ahash_export,
  1465. .import = ahash_import,
  1466. .setkey = axcbc_setkey,
  1467. .halg = {
  1468. .digestsize = AES_BLOCK_SIZE,
  1469. .statesize = sizeof(struct caam_export_state),
  1470. },
  1471. },
  1472. .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
  1473. }, {
  1474. .hmac_name = "cmac(aes)",
  1475. .hmac_driver_name = "cmac-aes-caam",
  1476. .blocksize = AES_BLOCK_SIZE,
  1477. .template_ahash = {
  1478. .init = ahash_init,
  1479. .update = ahash_update,
  1480. .final = ahash_final,
  1481. .finup = ahash_finup,
  1482. .digest = ahash_digest,
  1483. .export = ahash_export,
  1484. .import = ahash_import,
  1485. .setkey = acmac_setkey,
  1486. .halg = {
  1487. .digestsize = AES_BLOCK_SIZE,
  1488. .statesize = sizeof(struct caam_export_state),
  1489. },
  1490. },
  1491. .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
  1492. },
  1493. };
  1494. struct caam_hash_alg {
  1495. struct list_head entry;
  1496. int alg_type;
  1497. struct ahash_alg ahash_alg;
  1498. };
  1499. static int caam_hash_cra_init(struct crypto_tfm *tfm)
  1500. {
  1501. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  1502. struct crypto_alg *base = tfm->__crt_alg;
  1503. struct hash_alg_common *halg =
  1504. container_of(base, struct hash_alg_common, base);
  1505. struct ahash_alg *alg =
  1506. container_of(halg, struct ahash_alg, halg);
  1507. struct caam_hash_alg *caam_hash =
  1508. container_of(alg, struct caam_hash_alg, ahash_alg);
  1509. struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1510. /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
  1511. static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
  1512. HASH_MSG_LEN + SHA1_DIGEST_SIZE,
  1513. HASH_MSG_LEN + 32,
  1514. HASH_MSG_LEN + SHA256_DIGEST_SIZE,
  1515. HASH_MSG_LEN + 64,
  1516. HASH_MSG_LEN + SHA512_DIGEST_SIZE };
  1517. const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
  1518. sh_desc_update);
  1519. dma_addr_t dma_addr;
  1520. struct caam_drv_private *priv;
  1521. /*
  1522. * Get a Job ring from Job Ring driver to ensure in-order
  1523. * crypto request processing per tfm
  1524. */
  1525. ctx->jrdev = caam_jr_alloc();
  1526. if (IS_ERR(ctx->jrdev)) {
  1527. pr_err("Job Ring Device allocation for transform failed\n");
  1528. return PTR_ERR(ctx->jrdev);
  1529. }
  1530. priv = dev_get_drvdata(ctx->jrdev->parent);
  1531. if (is_xcbc_aes(caam_hash->alg_type)) {
  1532. ctx->dir = DMA_TO_DEVICE;
  1533. ctx->key_dir = DMA_BIDIRECTIONAL;
  1534. ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
  1535. ctx->ctx_len = 48;
  1536. } else if (is_cmac_aes(caam_hash->alg_type)) {
  1537. ctx->dir = DMA_TO_DEVICE;
  1538. ctx->key_dir = DMA_NONE;
  1539. ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
  1540. ctx->ctx_len = 32;
  1541. } else {
  1542. if (priv->era >= 6) {
  1543. ctx->dir = DMA_BIDIRECTIONAL;
  1544. ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
  1545. } else {
  1546. ctx->dir = DMA_TO_DEVICE;
  1547. ctx->key_dir = DMA_NONE;
  1548. }
  1549. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
  1550. ctx->ctx_len = runninglen[(ctx->adata.algtype &
  1551. OP_ALG_ALGSEL_SUBMASK) >>
  1552. OP_ALG_ALGSEL_SHIFT];
  1553. }
  1554. if (ctx->key_dir != DMA_NONE) {
  1555. ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
  1556. ARRAY_SIZE(ctx->key),
  1557. ctx->key_dir,
  1558. DMA_ATTR_SKIP_CPU_SYNC);
  1559. if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
  1560. dev_err(ctx->jrdev, "unable to map key\n");
  1561. caam_jr_free(ctx->jrdev);
  1562. return -ENOMEM;
  1563. }
  1564. }
  1565. dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
  1566. offsetof(struct caam_hash_ctx, key) -
  1567. sh_desc_update_offset,
  1568. ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  1569. if (dma_mapping_error(ctx->jrdev, dma_addr)) {
  1570. dev_err(ctx->jrdev, "unable to map shared descriptors\n");
  1571. if (ctx->key_dir != DMA_NONE)
  1572. dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
  1573. ARRAY_SIZE(ctx->key),
  1574. ctx->key_dir,
  1575. DMA_ATTR_SKIP_CPU_SYNC);
  1576. caam_jr_free(ctx->jrdev);
  1577. return -ENOMEM;
  1578. }
  1579. ctx->sh_desc_update_dma = dma_addr;
  1580. ctx->sh_desc_update_first_dma = dma_addr +
  1581. offsetof(struct caam_hash_ctx,
  1582. sh_desc_update_first) -
  1583. sh_desc_update_offset;
  1584. ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
  1585. sh_desc_fin) -
  1586. sh_desc_update_offset;
  1587. ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
  1588. sh_desc_digest) -
  1589. sh_desc_update_offset;
  1590. ctx->enginectx.op.do_one_request = ahash_do_one_req;
  1591. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1592. sizeof(struct caam_hash_state));
  1593. /*
  1594. * For keyed hash algorithms shared descriptors
  1595. * will be created later in setkey() callback
  1596. */
  1597. return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
  1598. }
  1599. static void caam_hash_cra_exit(struct crypto_tfm *tfm)
  1600. {
  1601. struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1602. dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
  1603. offsetof(struct caam_hash_ctx, key) -
  1604. offsetof(struct caam_hash_ctx, sh_desc_update),
  1605. ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  1606. if (ctx->key_dir != DMA_NONE)
  1607. dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
  1608. ARRAY_SIZE(ctx->key), ctx->key_dir,
  1609. DMA_ATTR_SKIP_CPU_SYNC);
  1610. caam_jr_free(ctx->jrdev);
  1611. }
  1612. void caam_algapi_hash_exit(void)
  1613. {
  1614. struct caam_hash_alg *t_alg, *n;
  1615. if (!hash_list.next)
  1616. return;
  1617. list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
  1618. crypto_unregister_ahash(&t_alg->ahash_alg);
  1619. list_del(&t_alg->entry);
  1620. kfree(t_alg);
  1621. }
  1622. }
  1623. static struct caam_hash_alg *
  1624. caam_hash_alloc(struct caam_hash_template *template,
  1625. bool keyed)
  1626. {
  1627. struct caam_hash_alg *t_alg;
  1628. struct ahash_alg *halg;
  1629. struct crypto_alg *alg;
  1630. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  1631. if (!t_alg) {
  1632. pr_err("failed to allocate t_alg\n");
  1633. return ERR_PTR(-ENOMEM);
  1634. }
  1635. t_alg->ahash_alg = template->template_ahash;
  1636. halg = &t_alg->ahash_alg;
  1637. alg = &halg->halg.base;
  1638. if (keyed) {
  1639. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
  1640. template->hmac_name);
  1641. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  1642. template->hmac_driver_name);
  1643. } else {
  1644. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
  1645. template->name);
  1646. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  1647. template->driver_name);
  1648. t_alg->ahash_alg.setkey = NULL;
  1649. }
  1650. alg->cra_module = THIS_MODULE;
  1651. alg->cra_init = caam_hash_cra_init;
  1652. alg->cra_exit = caam_hash_cra_exit;
  1653. alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
  1654. alg->cra_priority = CAAM_CRA_PRIORITY;
  1655. alg->cra_blocksize = template->blocksize;
  1656. alg->cra_alignmask = 0;
  1657. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
  1658. t_alg->alg_type = template->alg_type;
  1659. return t_alg;
  1660. }
  1661. int caam_algapi_hash_init(struct device *ctrldev)
  1662. {
  1663. int i = 0, err = 0;
  1664. struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
  1665. unsigned int md_limit = SHA512_DIGEST_SIZE;
  1666. u32 md_inst, md_vid;
  1667. /*
  1668. * Register crypto algorithms the device supports. First, identify
  1669. * presence and attributes of MD block.
  1670. */
  1671. if (priv->era < 10) {
  1672. md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
  1673. CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  1674. md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
  1675. CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  1676. } else {
  1677. u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
  1678. md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
  1679. md_inst = mdha & CHA_VER_NUM_MASK;
  1680. }
  1681. /*
  1682. * Skip registration of any hashing algorithms if MD block
  1683. * is not present.
  1684. */
  1685. if (!md_inst)
  1686. return 0;
  1687. /* Limit digest size based on LP256 */
  1688. if (md_vid == CHA_VER_VID_MD_LP256)
  1689. md_limit = SHA256_DIGEST_SIZE;
  1690. INIT_LIST_HEAD(&hash_list);
  1691. /* register crypto algorithms the device supports */
  1692. for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
  1693. struct caam_hash_alg *t_alg;
  1694. struct caam_hash_template *alg = driver_hash + i;
  1695. /* If MD size is not supported by device, skip registration */
  1696. if (is_mdha(alg->alg_type) &&
  1697. alg->template_ahash.halg.digestsize > md_limit)
  1698. continue;
  1699. /* register hmac version */
  1700. t_alg = caam_hash_alloc(alg, true);
  1701. if (IS_ERR(t_alg)) {
  1702. err = PTR_ERR(t_alg);
  1703. pr_warn("%s alg allocation failed\n",
  1704. alg->hmac_driver_name);
  1705. continue;
  1706. }
  1707. err = crypto_register_ahash(&t_alg->ahash_alg);
  1708. if (err) {
  1709. pr_warn("%s alg registration failed: %d\n",
  1710. t_alg->ahash_alg.halg.base.cra_driver_name,
  1711. err);
  1712. kfree(t_alg);
  1713. } else
  1714. list_add_tail(&t_alg->entry, &hash_list);
  1715. if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
  1716. continue;
  1717. /* register unkeyed version */
  1718. t_alg = caam_hash_alloc(alg, false);
  1719. if (IS_ERR(t_alg)) {
  1720. err = PTR_ERR(t_alg);
  1721. pr_warn("%s alg allocation failed\n", alg->driver_name);
  1722. continue;
  1723. }
  1724. err = crypto_register_ahash(&t_alg->ahash_alg);
  1725. if (err) {
  1726. pr_warn("%s alg registration failed: %d\n",
  1727. t_alg->ahash_alg.halg.base.cra_driver_name,
  1728. err);
  1729. kfree(t_alg);
  1730. } else
  1731. list_add_tail(&t_alg->entry, &hash_list);
  1732. }
  1733. return err;
  1734. }