cc_buffer_mgr.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
  3. #include <crypto/internal/aead.h>
  4. #include <crypto/authenc.h>
  5. #include <crypto/scatterwalk.h>
  6. #include <linux/dmapool.h>
  7. #include <linux/dma-mapping.h>
  8. #include "cc_buffer_mgr.h"
  9. #include "cc_lli_defs.h"
  10. #include "cc_cipher.h"
  11. #include "cc_hash.h"
  12. #include "cc_aead.h"
  13. union buffer_array_entry {
  14. struct scatterlist *sgl;
  15. dma_addr_t buffer_dma;
  16. };
  17. struct buffer_array {
  18. unsigned int num_of_buffers;
  19. union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  20. unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  21. int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  22. int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
  23. bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  24. u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  25. };
  26. static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  27. {
  28. switch (type) {
  29. case CC_DMA_BUF_NULL:
  30. return "BUF_NULL";
  31. case CC_DMA_BUF_DLLI:
  32. return "BUF_DLLI";
  33. case CC_DMA_BUF_MLLI:
  34. return "BUF_MLLI";
  35. default:
  36. return "BUF_INVALID";
  37. }
  38. }
  39. /**
  40. * cc_copy_mac() - Copy MAC to temporary location
  41. *
  42. * @dev: device object
  43. * @req: aead request object
  44. * @dir: [IN] copy from/to sgl
  45. */
  46. static void cc_copy_mac(struct device *dev, struct aead_request *req,
  47. enum cc_sg_cpy_direct dir)
  48. {
  49. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  50. u32 skip = req->assoclen + req->cryptlen;
  51. cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  52. (skip - areq_ctx->req_authsize), skip, dir);
  53. }
  54. /**
  55. * cc_get_sgl_nents() - Get scatterlist number of entries.
  56. *
  57. * @dev: Device object
  58. * @sg_list: SG list
  59. * @nbytes: [IN] Total SGL data bytes.
  60. * @lbytes: [OUT] Returns the amount of bytes at the last entry
  61. *
  62. * Return:
  63. * Number of entries in the scatterlist
  64. */
  65. static unsigned int cc_get_sgl_nents(struct device *dev,
  66. struct scatterlist *sg_list,
  67. unsigned int nbytes, u32 *lbytes)
  68. {
  69. unsigned int nents = 0;
  70. *lbytes = 0;
  71. while (nbytes && sg_list) {
  72. nents++;
  73. /* get the number of bytes in the last entry */
  74. *lbytes = nbytes;
  75. nbytes -= (sg_list->length > nbytes) ?
  76. nbytes : sg_list->length;
  77. sg_list = sg_next(sg_list);
  78. }
  79. dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
  80. return nents;
  81. }
  82. /**
  83. * cc_copy_sg_portion() - Copy scatter list data,
  84. * from to_skip to end, to dest and vice versa
  85. *
  86. * @dev: Device object
  87. * @dest: Buffer to copy to/from
  88. * @sg: SG list
  89. * @to_skip: Number of bytes to skip before copying
  90. * @end: Offset of last byte to copy
  91. * @direct: Transfer direction (true == from SG list to buffer, false == from
  92. * buffer to SG list)
  93. */
  94. void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
  95. u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
  96. {
  97. u32 nents;
  98. nents = sg_nents_for_len(sg, end);
  99. sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
  100. (direct == CC_SG_TO_BUF));
  101. }
  102. static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
  103. u32 buff_size, u32 *curr_nents,
  104. u32 **mlli_entry_pp)
  105. {
  106. u32 *mlli_entry_p = *mlli_entry_pp;
  107. u32 new_nents;
  108. /* Verify there is no memory overflow*/
  109. new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
  110. if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
  111. dev_err(dev, "Too many mlli entries. current %d max %d\n",
  112. new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
  113. return -ENOMEM;
  114. }
  115. /*handle buffer longer than 64 kbytes */
  116. while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
  117. cc_lli_set_addr(mlli_entry_p, buff_dma);
  118. cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
  119. dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
  120. *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
  121. mlli_entry_p[LLI_WORD1_OFFSET]);
  122. buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
  123. buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
  124. mlli_entry_p = mlli_entry_p + 2;
  125. (*curr_nents)++;
  126. }
  127. /*Last entry */
  128. cc_lli_set_addr(mlli_entry_p, buff_dma);
  129. cc_lli_set_size(mlli_entry_p, buff_size);
  130. dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
  131. *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
  132. mlli_entry_p[LLI_WORD1_OFFSET]);
  133. mlli_entry_p = mlli_entry_p + 2;
  134. *mlli_entry_pp = mlli_entry_p;
  135. (*curr_nents)++;
  136. return 0;
  137. }
  138. static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
  139. u32 sgl_data_len, u32 sgl_offset,
  140. u32 *curr_nents, u32 **mlli_entry_pp)
  141. {
  142. struct scatterlist *curr_sgl = sgl;
  143. u32 *mlli_entry_p = *mlli_entry_pp;
  144. s32 rc = 0;
  145. for ( ; (curr_sgl && sgl_data_len);
  146. curr_sgl = sg_next(curr_sgl)) {
  147. u32 entry_data_len =
  148. (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
  149. sg_dma_len(curr_sgl) - sgl_offset :
  150. sgl_data_len;
  151. sgl_data_len -= entry_data_len;
  152. rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
  153. sgl_offset, entry_data_len,
  154. curr_nents, &mlli_entry_p);
  155. if (rc)
  156. return rc;
  157. sgl_offset = 0;
  158. }
  159. *mlli_entry_pp = mlli_entry_p;
  160. return 0;
  161. }
  162. static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
  163. struct mlli_params *mlli_params, gfp_t flags)
  164. {
  165. u32 *mlli_p;
  166. u32 total_nents = 0, prev_total_nents = 0;
  167. int rc = 0, i;
  168. dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
  169. /* Allocate memory from the pointed pool */
  170. mlli_params->mlli_virt_addr =
  171. dma_pool_alloc(mlli_params->curr_pool, flags,
  172. &mlli_params->mlli_dma_addr);
  173. if (!mlli_params->mlli_virt_addr) {
  174. dev_err(dev, "dma_pool_alloc() failed\n");
  175. rc = -ENOMEM;
  176. goto build_mlli_exit;
  177. }
  178. /* Point to start of MLLI */
  179. mlli_p = mlli_params->mlli_virt_addr;
  180. /* go over all SG's and link it to one MLLI table */
  181. for (i = 0; i < sg_data->num_of_buffers; i++) {
  182. union buffer_array_entry *entry = &sg_data->entry[i];
  183. u32 tot_len = sg_data->total_data_len[i];
  184. u32 offset = sg_data->offset[i];
  185. rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
  186. &total_nents, &mlli_p);
  187. if (rc)
  188. return rc;
  189. /* set last bit in the current table */
  190. if (sg_data->mlli_nents[i]) {
  191. /*Calculate the current MLLI table length for the
  192. *length field in the descriptor
  193. */
  194. *sg_data->mlli_nents[i] +=
  195. (total_nents - prev_total_nents);
  196. prev_total_nents = total_nents;
  197. }
  198. }
  199. /* Set MLLI size for the bypass operation */
  200. mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
  201. dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
  202. mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
  203. mlli_params->mlli_len);
  204. build_mlli_exit:
  205. return rc;
  206. }
  207. static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
  208. unsigned int nents, struct scatterlist *sgl,
  209. unsigned int data_len, unsigned int data_offset,
  210. bool is_last_table, u32 *mlli_nents)
  211. {
  212. unsigned int index = sgl_data->num_of_buffers;
  213. dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
  214. index, nents, sgl, data_len, is_last_table);
  215. sgl_data->nents[index] = nents;
  216. sgl_data->entry[index].sgl = sgl;
  217. sgl_data->offset[index] = data_offset;
  218. sgl_data->total_data_len[index] = data_len;
  219. sgl_data->is_last[index] = is_last_table;
  220. sgl_data->mlli_nents[index] = mlli_nents;
  221. if (sgl_data->mlli_nents[index])
  222. *sgl_data->mlli_nents[index] = 0;
  223. sgl_data->num_of_buffers++;
  224. }
  225. static int cc_map_sg(struct device *dev, struct scatterlist *sg,
  226. unsigned int nbytes, int direction, u32 *nents,
  227. u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
  228. {
  229. int ret = 0;
  230. if (!nbytes) {
  231. *mapped_nents = 0;
  232. *lbytes = 0;
  233. *nents = 0;
  234. return 0;
  235. }
  236. *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
  237. if (*nents > max_sg_nents) {
  238. *nents = 0;
  239. dev_err(dev, "Too many fragments. current %d max %d\n",
  240. *nents, max_sg_nents);
  241. return -ENOMEM;
  242. }
  243. ret = dma_map_sg(dev, sg, *nents, direction);
  244. if (!ret) {
  245. *nents = 0;
  246. dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
  247. return -ENOMEM;
  248. }
  249. *mapped_nents = ret;
  250. return 0;
  251. }
  252. static int
  253. cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
  254. u8 *config_data, struct buffer_array *sg_data,
  255. unsigned int assoclen)
  256. {
  257. dev_dbg(dev, " handle additional data config set to DLLI\n");
  258. /* create sg for the current buffer */
  259. sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
  260. AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
  261. if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
  262. dev_err(dev, "dma_map_sg() config buffer failed\n");
  263. return -ENOMEM;
  264. }
  265. dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
  266. &sg_dma_address(&areq_ctx->ccm_adata_sg),
  267. sg_page(&areq_ctx->ccm_adata_sg),
  268. sg_virt(&areq_ctx->ccm_adata_sg),
  269. areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
  270. /* prepare for case of MLLI */
  271. if (assoclen > 0) {
  272. cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
  273. (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
  274. 0, false, NULL);
  275. }
  276. return 0;
  277. }
  278. static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
  279. u8 *curr_buff, u32 curr_buff_cnt,
  280. struct buffer_array *sg_data)
  281. {
  282. dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
  283. /* create sg for the current buffer */
  284. sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
  285. if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
  286. dev_err(dev, "dma_map_sg() src buffer failed\n");
  287. return -ENOMEM;
  288. }
  289. dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
  290. &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
  291. sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
  292. areq_ctx->buff_sg->length);
  293. areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
  294. areq_ctx->curr_sg = areq_ctx->buff_sg;
  295. areq_ctx->in_nents = 0;
  296. /* prepare for case of MLLI */
  297. cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
  298. false, NULL);
  299. return 0;
  300. }
  301. void cc_unmap_cipher_request(struct device *dev, void *ctx,
  302. unsigned int ivsize, struct scatterlist *src,
  303. struct scatterlist *dst)
  304. {
  305. struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
  306. if (req_ctx->gen_ctx.iv_dma_addr) {
  307. dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
  308. &req_ctx->gen_ctx.iv_dma_addr, ivsize);
  309. dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
  310. ivsize, DMA_BIDIRECTIONAL);
  311. }
  312. /* Release pool */
  313. if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
  314. req_ctx->mlli_params.mlli_virt_addr) {
  315. dma_pool_free(req_ctx->mlli_params.curr_pool,
  316. req_ctx->mlli_params.mlli_virt_addr,
  317. req_ctx->mlli_params.mlli_dma_addr);
  318. }
  319. if (src != dst) {
  320. dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
  321. dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
  322. dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
  323. dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
  324. } else {
  325. dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
  326. dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
  327. }
  328. }
  329. int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
  330. unsigned int ivsize, unsigned int nbytes,
  331. void *info, struct scatterlist *src,
  332. struct scatterlist *dst, gfp_t flags)
  333. {
  334. struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
  335. struct mlli_params *mlli_params = &req_ctx->mlli_params;
  336. struct device *dev = drvdata_to_dev(drvdata);
  337. struct buffer_array sg_data;
  338. u32 dummy = 0;
  339. int rc = 0;
  340. u32 mapped_nents = 0;
  341. int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
  342. req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
  343. mlli_params->curr_pool = NULL;
  344. sg_data.num_of_buffers = 0;
  345. /* Map IV buffer */
  346. if (ivsize) {
  347. dump_byte_array("iv", info, ivsize);
  348. req_ctx->gen_ctx.iv_dma_addr =
  349. dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
  350. if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
  351. dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
  352. ivsize, info);
  353. return -ENOMEM;
  354. }
  355. dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
  356. ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
  357. } else {
  358. req_ctx->gen_ctx.iv_dma_addr = 0;
  359. }
  360. /* Map the src SGL */
  361. rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
  362. LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
  363. if (rc)
  364. goto cipher_exit;
  365. if (mapped_nents > 1)
  366. req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
  367. if (src == dst) {
  368. /* Handle inplace operation */
  369. if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
  370. req_ctx->out_nents = 0;
  371. cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
  372. nbytes, 0, true,
  373. &req_ctx->in_mlli_nents);
  374. }
  375. } else {
  376. /* Map the dst sg */
  377. rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
  378. &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
  379. &dummy, &mapped_nents);
  380. if (rc)
  381. goto cipher_exit;
  382. if (mapped_nents > 1)
  383. req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
  384. if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
  385. cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
  386. nbytes, 0, true,
  387. &req_ctx->in_mlli_nents);
  388. cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
  389. nbytes, 0, true,
  390. &req_ctx->out_mlli_nents);
  391. }
  392. }
  393. if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
  394. mlli_params->curr_pool = drvdata->mlli_buffs_pool;
  395. rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
  396. if (rc)
  397. goto cipher_exit;
  398. }
  399. dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
  400. cc_dma_buf_type(req_ctx->dma_buf_type));
  401. return 0;
  402. cipher_exit:
  403. cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
  404. return rc;
  405. }
  406. void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
  407. {
  408. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  409. unsigned int hw_iv_size = areq_ctx->hw_iv_size;
  410. struct cc_drvdata *drvdata = dev_get_drvdata(dev);
  411. int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
  412. if (areq_ctx->mac_buf_dma_addr) {
  413. dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
  414. MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
  415. }
  416. if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
  417. if (areq_ctx->hkey_dma_addr) {
  418. dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
  419. AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
  420. }
  421. if (areq_ctx->gcm_block_len_dma_addr) {
  422. dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
  423. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  424. }
  425. if (areq_ctx->gcm_iv_inc1_dma_addr) {
  426. dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
  427. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  428. }
  429. if (areq_ctx->gcm_iv_inc2_dma_addr) {
  430. dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
  431. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  432. }
  433. }
  434. if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
  435. if (areq_ctx->ccm_iv0_dma_addr) {
  436. dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
  437. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  438. }
  439. dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
  440. }
  441. if (areq_ctx->gen_ctx.iv_dma_addr) {
  442. dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
  443. hw_iv_size, DMA_BIDIRECTIONAL);
  444. kfree_sensitive(areq_ctx->gen_ctx.iv);
  445. }
  446. /* Release pool */
  447. if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
  448. areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
  449. (areq_ctx->mlli_params.mlli_virt_addr)) {
  450. dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
  451. &areq_ctx->mlli_params.mlli_dma_addr,
  452. areq_ctx->mlli_params.mlli_virt_addr);
  453. dma_pool_free(areq_ctx->mlli_params.curr_pool,
  454. areq_ctx->mlli_params.mlli_virt_addr,
  455. areq_ctx->mlli_params.mlli_dma_addr);
  456. }
  457. dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
  458. sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
  459. areq_ctx->assoclen, req->cryptlen);
  460. dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
  461. if (req->src != req->dst) {
  462. dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
  463. sg_virt(req->dst));
  464. dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
  465. }
  466. if (drvdata->coherent &&
  467. areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
  468. req->src == req->dst) {
  469. /* copy back mac from temporary location to deal with possible
  470. * data memory overriding that caused by cache coherence
  471. * problem.
  472. */
  473. cc_copy_mac(dev, req, CC_SG_FROM_BUF);
  474. }
  475. }
  476. static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
  477. u32 last_entry_data_size)
  478. {
  479. return ((sgl_nents > 1) && (last_entry_data_size < authsize));
  480. }
  481. static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
  482. struct aead_request *req,
  483. struct buffer_array *sg_data,
  484. bool is_last, bool do_chain)
  485. {
  486. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  487. unsigned int hw_iv_size = areq_ctx->hw_iv_size;
  488. struct device *dev = drvdata_to_dev(drvdata);
  489. gfp_t flags = cc_gfp_flags(&req->base);
  490. int rc = 0;
  491. if (!req->iv) {
  492. areq_ctx->gen_ctx.iv_dma_addr = 0;
  493. areq_ctx->gen_ctx.iv = NULL;
  494. goto chain_iv_exit;
  495. }
  496. areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
  497. if (!areq_ctx->gen_ctx.iv)
  498. return -ENOMEM;
  499. areq_ctx->gen_ctx.iv_dma_addr =
  500. dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
  501. DMA_BIDIRECTIONAL);
  502. if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
  503. dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
  504. hw_iv_size, req->iv);
  505. kfree_sensitive(areq_ctx->gen_ctx.iv);
  506. areq_ctx->gen_ctx.iv = NULL;
  507. rc = -ENOMEM;
  508. goto chain_iv_exit;
  509. }
  510. dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
  511. hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
  512. chain_iv_exit:
  513. return rc;
  514. }
  515. static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
  516. struct aead_request *req,
  517. struct buffer_array *sg_data,
  518. bool is_last, bool do_chain)
  519. {
  520. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  521. int rc = 0;
  522. int mapped_nents = 0;
  523. struct device *dev = drvdata_to_dev(drvdata);
  524. if (!sg_data) {
  525. rc = -EINVAL;
  526. goto chain_assoc_exit;
  527. }
  528. if (areq_ctx->assoclen == 0) {
  529. areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
  530. areq_ctx->assoc.nents = 0;
  531. areq_ctx->assoc.mlli_nents = 0;
  532. dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
  533. cc_dma_buf_type(areq_ctx->assoc_buff_type),
  534. areq_ctx->assoc.nents);
  535. goto chain_assoc_exit;
  536. }
  537. mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
  538. if (mapped_nents < 0)
  539. return mapped_nents;
  540. if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
  541. dev_err(dev, "Too many fragments. current %d max %d\n",
  542. mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
  543. return -ENOMEM;
  544. }
  545. areq_ctx->assoc.nents = mapped_nents;
  546. /* in CCM case we have additional entry for
  547. * ccm header configurations
  548. */
  549. if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
  550. if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
  551. dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
  552. (areq_ctx->assoc.nents + 1),
  553. LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
  554. rc = -ENOMEM;
  555. goto chain_assoc_exit;
  556. }
  557. }
  558. if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
  559. areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
  560. else
  561. areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
  562. if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
  563. dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
  564. cc_dma_buf_type(areq_ctx->assoc_buff_type),
  565. areq_ctx->assoc.nents);
  566. cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
  567. areq_ctx->assoclen, 0, is_last,
  568. &areq_ctx->assoc.mlli_nents);
  569. areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
  570. }
  571. chain_assoc_exit:
  572. return rc;
  573. }
  574. static void cc_prepare_aead_data_dlli(struct aead_request *req,
  575. u32 *src_last_bytes, u32 *dst_last_bytes)
  576. {
  577. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  578. enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
  579. unsigned int authsize = areq_ctx->req_authsize;
  580. struct scatterlist *sg;
  581. ssize_t offset;
  582. areq_ctx->is_icv_fragmented = false;
  583. if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
  584. sg = areq_ctx->src_sgl;
  585. offset = *src_last_bytes - authsize;
  586. } else {
  587. sg = areq_ctx->dst_sgl;
  588. offset = *dst_last_bytes - authsize;
  589. }
  590. areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
  591. areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
  592. }
  593. static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
  594. struct aead_request *req,
  595. struct buffer_array *sg_data,
  596. u32 *src_last_bytes, u32 *dst_last_bytes,
  597. bool is_last_table)
  598. {
  599. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  600. enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
  601. unsigned int authsize = areq_ctx->req_authsize;
  602. struct device *dev = drvdata_to_dev(drvdata);
  603. struct scatterlist *sg;
  604. if (req->src == req->dst) {
  605. /*INPLACE*/
  606. cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
  607. areq_ctx->src_sgl, areq_ctx->cryptlen,
  608. areq_ctx->src_offset, is_last_table,
  609. &areq_ctx->src.mlli_nents);
  610. areq_ctx->is_icv_fragmented =
  611. cc_is_icv_frag(areq_ctx->src.nents, authsize,
  612. *src_last_bytes);
  613. if (areq_ctx->is_icv_fragmented) {
  614. /* Backup happens only when ICV is fragmented, ICV
  615. * verification is made by CPU compare in order to
  616. * simplify MAC verification upon request completion
  617. */
  618. if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
  619. /* In coherent platforms (e.g. ACP)
  620. * already copying ICV for any
  621. * INPLACE-DECRYPT operation, hence
  622. * we must neglect this code.
  623. */
  624. if (!drvdata->coherent)
  625. cc_copy_mac(dev, req, CC_SG_TO_BUF);
  626. areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
  627. } else {
  628. areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
  629. areq_ctx->icv_dma_addr =
  630. areq_ctx->mac_buf_dma_addr;
  631. }
  632. } else { /* Contig. ICV */
  633. sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
  634. /*Should hanlde if the sg is not contig.*/
  635. areq_ctx->icv_dma_addr = sg_dma_address(sg) +
  636. (*src_last_bytes - authsize);
  637. areq_ctx->icv_virt_addr = sg_virt(sg) +
  638. (*src_last_bytes - authsize);
  639. }
  640. } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
  641. /*NON-INPLACE and DECRYPT*/
  642. cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
  643. areq_ctx->src_sgl, areq_ctx->cryptlen,
  644. areq_ctx->src_offset, is_last_table,
  645. &areq_ctx->src.mlli_nents);
  646. cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
  647. areq_ctx->dst_sgl, areq_ctx->cryptlen,
  648. areq_ctx->dst_offset, is_last_table,
  649. &areq_ctx->dst.mlli_nents);
  650. areq_ctx->is_icv_fragmented =
  651. cc_is_icv_frag(areq_ctx->src.nents, authsize,
  652. *src_last_bytes);
  653. /* Backup happens only when ICV is fragmented, ICV
  654. * verification is made by CPU compare in order to simplify
  655. * MAC verification upon request completion
  656. */
  657. if (areq_ctx->is_icv_fragmented) {
  658. cc_copy_mac(dev, req, CC_SG_TO_BUF);
  659. areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
  660. } else { /* Contig. ICV */
  661. sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
  662. /*Should hanlde if the sg is not contig.*/
  663. areq_ctx->icv_dma_addr = sg_dma_address(sg) +
  664. (*src_last_bytes - authsize);
  665. areq_ctx->icv_virt_addr = sg_virt(sg) +
  666. (*src_last_bytes - authsize);
  667. }
  668. } else {
  669. /*NON-INPLACE and ENCRYPT*/
  670. cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
  671. areq_ctx->dst_sgl, areq_ctx->cryptlen,
  672. areq_ctx->dst_offset, is_last_table,
  673. &areq_ctx->dst.mlli_nents);
  674. cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
  675. areq_ctx->src_sgl, areq_ctx->cryptlen,
  676. areq_ctx->src_offset, is_last_table,
  677. &areq_ctx->src.mlli_nents);
  678. areq_ctx->is_icv_fragmented =
  679. cc_is_icv_frag(areq_ctx->dst.nents, authsize,
  680. *dst_last_bytes);
  681. if (!areq_ctx->is_icv_fragmented) {
  682. sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
  683. /* Contig. ICV */
  684. areq_ctx->icv_dma_addr = sg_dma_address(sg) +
  685. (*dst_last_bytes - authsize);
  686. areq_ctx->icv_virt_addr = sg_virt(sg) +
  687. (*dst_last_bytes - authsize);
  688. } else {
  689. areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
  690. areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
  691. }
  692. }
  693. }
  694. static int cc_aead_chain_data(struct cc_drvdata *drvdata,
  695. struct aead_request *req,
  696. struct buffer_array *sg_data,
  697. bool is_last_table, bool do_chain)
  698. {
  699. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  700. struct device *dev = drvdata_to_dev(drvdata);
  701. enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
  702. unsigned int authsize = areq_ctx->req_authsize;
  703. unsigned int src_last_bytes = 0, dst_last_bytes = 0;
  704. int rc = 0;
  705. u32 src_mapped_nents = 0, dst_mapped_nents = 0;
  706. u32 offset = 0;
  707. /* non-inplace mode */
  708. unsigned int size_for_map = req->assoclen + req->cryptlen;
  709. u32 sg_index = 0;
  710. u32 size_to_skip = req->assoclen;
  711. struct scatterlist *sgl;
  712. offset = size_to_skip;
  713. if (!sg_data)
  714. return -EINVAL;
  715. areq_ctx->src_sgl = req->src;
  716. areq_ctx->dst_sgl = req->dst;
  717. size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
  718. authsize : 0;
  719. src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
  720. &src_last_bytes);
  721. sg_index = areq_ctx->src_sgl->length;
  722. //check where the data starts
  723. while (src_mapped_nents && (sg_index <= size_to_skip)) {
  724. src_mapped_nents--;
  725. offset -= areq_ctx->src_sgl->length;
  726. sgl = sg_next(areq_ctx->src_sgl);
  727. if (!sgl)
  728. break;
  729. areq_ctx->src_sgl = sgl;
  730. sg_index += areq_ctx->src_sgl->length;
  731. }
  732. if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
  733. dev_err(dev, "Too many fragments. current %d max %d\n",
  734. src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
  735. return -ENOMEM;
  736. }
  737. areq_ctx->src.nents = src_mapped_nents;
  738. areq_ctx->src_offset = offset;
  739. if (req->src != req->dst) {
  740. size_for_map = req->assoclen + req->cryptlen;
  741. if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
  742. size_for_map += authsize;
  743. else
  744. size_for_map -= authsize;
  745. rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
  746. &areq_ctx->dst.mapped_nents,
  747. LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
  748. &dst_mapped_nents);
  749. if (rc)
  750. goto chain_data_exit;
  751. }
  752. dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
  753. &dst_last_bytes);
  754. sg_index = areq_ctx->dst_sgl->length;
  755. offset = size_to_skip;
  756. //check where the data starts
  757. while (dst_mapped_nents && sg_index <= size_to_skip) {
  758. dst_mapped_nents--;
  759. offset -= areq_ctx->dst_sgl->length;
  760. sgl = sg_next(areq_ctx->dst_sgl);
  761. if (!sgl)
  762. break;
  763. areq_ctx->dst_sgl = sgl;
  764. sg_index += areq_ctx->dst_sgl->length;
  765. }
  766. if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
  767. dev_err(dev, "Too many fragments. current %d max %d\n",
  768. dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
  769. return -ENOMEM;
  770. }
  771. areq_ctx->dst.nents = dst_mapped_nents;
  772. areq_ctx->dst_offset = offset;
  773. if (src_mapped_nents > 1 ||
  774. dst_mapped_nents > 1 ||
  775. do_chain) {
  776. areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
  777. cc_prepare_aead_data_mlli(drvdata, req, sg_data,
  778. &src_last_bytes, &dst_last_bytes,
  779. is_last_table);
  780. } else {
  781. areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
  782. cc_prepare_aead_data_dlli(req, &src_last_bytes,
  783. &dst_last_bytes);
  784. }
  785. chain_data_exit:
  786. return rc;
  787. }
  788. static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
  789. struct aead_request *req)
  790. {
  791. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  792. u32 curr_mlli_size = 0;
  793. if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
  794. areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
  795. curr_mlli_size = areq_ctx->assoc.mlli_nents *
  796. LLI_ENTRY_BYTE_SIZE;
  797. }
  798. if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
  799. /*Inplace case dst nents equal to src nents*/
  800. if (req->src == req->dst) {
  801. areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
  802. areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
  803. curr_mlli_size;
  804. areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
  805. if (!areq_ctx->is_single_pass)
  806. areq_ctx->assoc.mlli_nents +=
  807. areq_ctx->src.mlli_nents;
  808. } else {
  809. if (areq_ctx->gen_ctx.op_type ==
  810. DRV_CRYPTO_DIRECTION_DECRYPT) {
  811. areq_ctx->src.sram_addr =
  812. drvdata->mlli_sram_addr +
  813. curr_mlli_size;
  814. areq_ctx->dst.sram_addr =
  815. areq_ctx->src.sram_addr +
  816. areq_ctx->src.mlli_nents *
  817. LLI_ENTRY_BYTE_SIZE;
  818. if (!areq_ctx->is_single_pass)
  819. areq_ctx->assoc.mlli_nents +=
  820. areq_ctx->src.mlli_nents;
  821. } else {
  822. areq_ctx->dst.sram_addr =
  823. drvdata->mlli_sram_addr +
  824. curr_mlli_size;
  825. areq_ctx->src.sram_addr =
  826. areq_ctx->dst.sram_addr +
  827. areq_ctx->dst.mlli_nents *
  828. LLI_ENTRY_BYTE_SIZE;
  829. if (!areq_ctx->is_single_pass)
  830. areq_ctx->assoc.mlli_nents +=
  831. areq_ctx->dst.mlli_nents;
  832. }
  833. }
  834. }
  835. }
  836. int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
  837. {
  838. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  839. struct mlli_params *mlli_params = &areq_ctx->mlli_params;
  840. struct device *dev = drvdata_to_dev(drvdata);
  841. struct buffer_array sg_data;
  842. unsigned int authsize = areq_ctx->req_authsize;
  843. int rc = 0;
  844. dma_addr_t dma_addr;
  845. u32 mapped_nents = 0;
  846. u32 dummy = 0; /*used for the assoc data fragments */
  847. u32 size_to_map;
  848. gfp_t flags = cc_gfp_flags(&req->base);
  849. mlli_params->curr_pool = NULL;
  850. sg_data.num_of_buffers = 0;
  851. /* copy mac to a temporary location to deal with possible
  852. * data memory overriding that caused by cache coherence problem.
  853. */
  854. if (drvdata->coherent &&
  855. areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
  856. req->src == req->dst)
  857. cc_copy_mac(dev, req, CC_SG_TO_BUF);
  858. /* cacluate the size for cipher remove ICV in decrypt*/
  859. areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
  860. DRV_CRYPTO_DIRECTION_ENCRYPT) ?
  861. req->cryptlen :
  862. (req->cryptlen - authsize);
  863. dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
  864. DMA_BIDIRECTIONAL);
  865. if (dma_mapping_error(dev, dma_addr)) {
  866. dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
  867. MAX_MAC_SIZE, areq_ctx->mac_buf);
  868. rc = -ENOMEM;
  869. goto aead_map_failure;
  870. }
  871. areq_ctx->mac_buf_dma_addr = dma_addr;
  872. if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
  873. void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
  874. dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
  875. DMA_TO_DEVICE);
  876. if (dma_mapping_error(dev, dma_addr)) {
  877. dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
  878. AES_BLOCK_SIZE, addr);
  879. areq_ctx->ccm_iv0_dma_addr = 0;
  880. rc = -ENOMEM;
  881. goto aead_map_failure;
  882. }
  883. areq_ctx->ccm_iv0_dma_addr = dma_addr;
  884. rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
  885. &sg_data, areq_ctx->assoclen);
  886. if (rc)
  887. goto aead_map_failure;
  888. }
  889. if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
  890. dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
  891. DMA_BIDIRECTIONAL);
  892. if (dma_mapping_error(dev, dma_addr)) {
  893. dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
  894. AES_BLOCK_SIZE, areq_ctx->hkey);
  895. rc = -ENOMEM;
  896. goto aead_map_failure;
  897. }
  898. areq_ctx->hkey_dma_addr = dma_addr;
  899. dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
  900. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  901. if (dma_mapping_error(dev, dma_addr)) {
  902. dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
  903. AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
  904. rc = -ENOMEM;
  905. goto aead_map_failure;
  906. }
  907. areq_ctx->gcm_block_len_dma_addr = dma_addr;
  908. dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
  909. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  910. if (dma_mapping_error(dev, dma_addr)) {
  911. dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
  912. AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
  913. areq_ctx->gcm_iv_inc1_dma_addr = 0;
  914. rc = -ENOMEM;
  915. goto aead_map_failure;
  916. }
  917. areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
  918. dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
  919. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  920. if (dma_mapping_error(dev, dma_addr)) {
  921. dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
  922. AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
  923. areq_ctx->gcm_iv_inc2_dma_addr = 0;
  924. rc = -ENOMEM;
  925. goto aead_map_failure;
  926. }
  927. areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
  928. }
  929. size_to_map = req->cryptlen + req->assoclen;
  930. /* If we do in-place encryption, we also need the auth tag */
  931. if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
  932. (req->src == req->dst)) {
  933. size_to_map += authsize;
  934. }
  935. rc = cc_map_sg(dev, req->src, size_to_map,
  936. (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
  937. &areq_ctx->src.mapped_nents,
  938. (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
  939. LLI_MAX_NUM_OF_DATA_ENTRIES),
  940. &dummy, &mapped_nents);
  941. if (rc)
  942. goto aead_map_failure;
  943. if (areq_ctx->is_single_pass) {
  944. /*
  945. * Create MLLI table for:
  946. * (1) Assoc. data
  947. * (2) Src/Dst SGLs
  948. * Note: IV is contg. buffer (not an SGL)
  949. */
  950. rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
  951. if (rc)
  952. goto aead_map_failure;
  953. rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
  954. if (rc)
  955. goto aead_map_failure;
  956. rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
  957. if (rc)
  958. goto aead_map_failure;
  959. } else { /* DOUBLE-PASS flow */
  960. /*
  961. * Prepare MLLI table(s) in this order:
  962. *
  963. * If ENCRYPT/DECRYPT (inplace):
  964. * (1) MLLI table for assoc
  965. * (2) IV entry (chained right after end of assoc)
  966. * (3) MLLI for src/dst (inplace operation)
  967. *
  968. * If ENCRYPT (non-inplace)
  969. * (1) MLLI table for assoc
  970. * (2) IV entry (chained right after end of assoc)
  971. * (3) MLLI for dst
  972. * (4) MLLI for src
  973. *
  974. * If DECRYPT (non-inplace)
  975. * (1) MLLI table for assoc
  976. * (2) IV entry (chained right after end of assoc)
  977. * (3) MLLI for src
  978. * (4) MLLI for dst
  979. */
  980. rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
  981. if (rc)
  982. goto aead_map_failure;
  983. rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
  984. if (rc)
  985. goto aead_map_failure;
  986. rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
  987. if (rc)
  988. goto aead_map_failure;
  989. }
  990. /* Mlli support -start building the MLLI according to the above
  991. * results
  992. */
  993. if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
  994. areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
  995. mlli_params->curr_pool = drvdata->mlli_buffs_pool;
  996. rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
  997. if (rc)
  998. goto aead_map_failure;
  999. cc_update_aead_mlli_nents(drvdata, req);
  1000. dev_dbg(dev, "assoc params mn %d\n",
  1001. areq_ctx->assoc.mlli_nents);
  1002. dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
  1003. dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
  1004. }
  1005. return 0;
  1006. aead_map_failure:
  1007. cc_unmap_aead_request(dev, req);
  1008. return rc;
  1009. }
  1010. int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
  1011. struct scatterlist *src, unsigned int nbytes,
  1012. bool do_update, gfp_t flags)
  1013. {
  1014. struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
  1015. struct device *dev = drvdata_to_dev(drvdata);
  1016. u8 *curr_buff = cc_hash_buf(areq_ctx);
  1017. u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
  1018. struct mlli_params *mlli_params = &areq_ctx->mlli_params;
  1019. struct buffer_array sg_data;
  1020. int rc = 0;
  1021. u32 dummy = 0;
  1022. u32 mapped_nents = 0;
  1023. dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
  1024. curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
  1025. /* Init the type of the dma buffer */
  1026. areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
  1027. mlli_params->curr_pool = NULL;
  1028. sg_data.num_of_buffers = 0;
  1029. areq_ctx->in_nents = 0;
  1030. if (nbytes == 0 && *curr_buff_cnt == 0) {
  1031. /* nothing to do */
  1032. return 0;
  1033. }
  1034. /* map the previous buffer */
  1035. if (*curr_buff_cnt) {
  1036. rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
  1037. &sg_data);
  1038. if (rc)
  1039. return rc;
  1040. }
  1041. if (src && nbytes > 0 && do_update) {
  1042. rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
  1043. &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
  1044. &dummy, &mapped_nents);
  1045. if (rc)
  1046. goto unmap_curr_buff;
  1047. if (src && mapped_nents == 1 &&
  1048. areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
  1049. memcpy(areq_ctx->buff_sg, src,
  1050. sizeof(struct scatterlist));
  1051. areq_ctx->buff_sg->length = nbytes;
  1052. areq_ctx->curr_sg = areq_ctx->buff_sg;
  1053. areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
  1054. } else {
  1055. areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
  1056. }
  1057. }
  1058. /*build mlli */
  1059. if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
  1060. mlli_params->curr_pool = drvdata->mlli_buffs_pool;
  1061. /* add the src data to the sg_data */
  1062. cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
  1063. 0, true, &areq_ctx->mlli_nents);
  1064. rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
  1065. if (rc)
  1066. goto fail_unmap_din;
  1067. }
  1068. /* change the buffer index for the unmap function */
  1069. areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
  1070. dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
  1071. cc_dma_buf_type(areq_ctx->data_dma_buf_type));
  1072. return 0;
  1073. fail_unmap_din:
  1074. dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
  1075. unmap_curr_buff:
  1076. if (*curr_buff_cnt)
  1077. dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
  1078. return rc;
  1079. }
  1080. int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
  1081. struct scatterlist *src, unsigned int nbytes,
  1082. unsigned int block_size, gfp_t flags)
  1083. {
  1084. struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
  1085. struct device *dev = drvdata_to_dev(drvdata);
  1086. u8 *curr_buff = cc_hash_buf(areq_ctx);
  1087. u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
  1088. u8 *next_buff = cc_next_buf(areq_ctx);
  1089. u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
  1090. struct mlli_params *mlli_params = &areq_ctx->mlli_params;
  1091. unsigned int update_data_len;
  1092. u32 total_in_len = nbytes + *curr_buff_cnt;
  1093. struct buffer_array sg_data;
  1094. unsigned int swap_index = 0;
  1095. int rc = 0;
  1096. u32 dummy = 0;
  1097. u32 mapped_nents = 0;
  1098. dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
  1099. curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
  1100. /* Init the type of the dma buffer */
  1101. areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
  1102. mlli_params->curr_pool = NULL;
  1103. areq_ctx->curr_sg = NULL;
  1104. sg_data.num_of_buffers = 0;
  1105. areq_ctx->in_nents = 0;
  1106. if (total_in_len < block_size) {
  1107. dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
  1108. curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
  1109. areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
  1110. sg_copy_to_buffer(src, areq_ctx->in_nents,
  1111. &curr_buff[*curr_buff_cnt], nbytes);
  1112. *curr_buff_cnt += nbytes;
  1113. return 1;
  1114. }
  1115. /* Calculate the residue size*/
  1116. *next_buff_cnt = total_in_len & (block_size - 1);
  1117. /* update data len */
  1118. update_data_len = total_in_len - *next_buff_cnt;
  1119. dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
  1120. *next_buff_cnt, update_data_len);
  1121. /* Copy the new residue to next buffer */
  1122. if (*next_buff_cnt) {
  1123. dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
  1124. next_buff, (update_data_len - *curr_buff_cnt),
  1125. *next_buff_cnt);
  1126. cc_copy_sg_portion(dev, next_buff, src,
  1127. (update_data_len - *curr_buff_cnt),
  1128. nbytes, CC_SG_TO_BUF);
  1129. /* change the buffer index for next operation */
  1130. swap_index = 1;
  1131. }
  1132. if (*curr_buff_cnt) {
  1133. rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
  1134. &sg_data);
  1135. if (rc)
  1136. return rc;
  1137. /* change the buffer index for next operation */
  1138. swap_index = 1;
  1139. }
  1140. if (update_data_len > *curr_buff_cnt) {
  1141. rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
  1142. DMA_TO_DEVICE, &areq_ctx->in_nents,
  1143. LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
  1144. &mapped_nents);
  1145. if (rc)
  1146. goto unmap_curr_buff;
  1147. if (mapped_nents == 1 &&
  1148. areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
  1149. /* only one entry in the SG and no previous data */
  1150. memcpy(areq_ctx->buff_sg, src,
  1151. sizeof(struct scatterlist));
  1152. areq_ctx->buff_sg->length = update_data_len;
  1153. areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
  1154. areq_ctx->curr_sg = areq_ctx->buff_sg;
  1155. } else {
  1156. areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
  1157. }
  1158. }
  1159. if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
  1160. mlli_params->curr_pool = drvdata->mlli_buffs_pool;
  1161. /* add the src data to the sg_data */
  1162. cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
  1163. (update_data_len - *curr_buff_cnt), 0, true,
  1164. &areq_ctx->mlli_nents);
  1165. rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
  1166. if (rc)
  1167. goto fail_unmap_din;
  1168. }
  1169. areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
  1170. return 0;
  1171. fail_unmap_din:
  1172. dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
  1173. unmap_curr_buff:
  1174. if (*curr_buff_cnt)
  1175. dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
  1176. return rc;
  1177. }
  1178. void cc_unmap_hash_request(struct device *dev, void *ctx,
  1179. struct scatterlist *src, bool do_revert)
  1180. {
  1181. struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
  1182. u32 *prev_len = cc_next_buf_cnt(areq_ctx);
  1183. /*In case a pool was set, a table was
  1184. *allocated and should be released
  1185. */
  1186. if (areq_ctx->mlli_params.curr_pool) {
  1187. dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
  1188. &areq_ctx->mlli_params.mlli_dma_addr,
  1189. areq_ctx->mlli_params.mlli_virt_addr);
  1190. dma_pool_free(areq_ctx->mlli_params.curr_pool,
  1191. areq_ctx->mlli_params.mlli_virt_addr,
  1192. areq_ctx->mlli_params.mlli_dma_addr);
  1193. }
  1194. if (src && areq_ctx->in_nents) {
  1195. dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
  1196. sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
  1197. dma_unmap_sg(dev, src,
  1198. areq_ctx->in_nents, DMA_TO_DEVICE);
  1199. }
  1200. if (*prev_len) {
  1201. dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
  1202. sg_virt(areq_ctx->buff_sg),
  1203. &sg_dma_address(areq_ctx->buff_sg),
  1204. sg_dma_len(areq_ctx->buff_sg));
  1205. dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
  1206. if (!do_revert) {
  1207. /* clean the previous data length for update
  1208. * operation
  1209. */
  1210. *prev_len = 0;
  1211. } else {
  1212. areq_ctx->buff_index ^= 1;
  1213. }
  1214. }
  1215. }
  1216. int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
  1217. {
  1218. struct device *dev = drvdata_to_dev(drvdata);
  1219. drvdata->mlli_buffs_pool =
  1220. dma_pool_create("dx_single_mlli_tables", dev,
  1221. MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
  1222. LLI_ENTRY_BYTE_SIZE,
  1223. MLLI_TABLE_MIN_ALIGNMENT, 0);
  1224. if (!drvdata->mlli_buffs_pool)
  1225. return -ENOMEM;
  1226. return 0;
  1227. }
  1228. int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
  1229. {
  1230. dma_pool_destroy(drvdata->mlli_buffs_pool);
  1231. return 0;
  1232. }