sec_crypto.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <crypto/aes.h>
  4. #include <crypto/aead.h>
  5. #include <crypto/algapi.h>
  6. #include <crypto/authenc.h>
  7. #include <crypto/des.h>
  8. #include <crypto/hash.h>
  9. #include <crypto/internal/aead.h>
  10. #include <crypto/internal/des.h>
  11. #include <crypto/sha1.h>
  12. #include <crypto/sha2.h>
  13. #include <crypto/skcipher.h>
  14. #include <crypto/xts.h>
  15. #include <linux/crypto.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/idr.h>
  18. #include "sec.h"
  19. #include "sec_crypto.h"
  20. #define SEC_PRIORITY 4001
  21. #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
  22. #define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
  23. #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
  24. #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
  25. #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
  26. /* SEC sqe(bd) bit operational relative MACRO */
  27. #define SEC_DE_OFFSET 1
  28. #define SEC_CIPHER_OFFSET 4
  29. #define SEC_SCENE_OFFSET 3
  30. #define SEC_DST_SGL_OFFSET 2
  31. #define SEC_SRC_SGL_OFFSET 7
  32. #define SEC_CKEY_OFFSET 9
  33. #define SEC_CMODE_OFFSET 12
  34. #define SEC_AKEY_OFFSET 5
  35. #define SEC_AEAD_ALG_OFFSET 11
  36. #define SEC_AUTH_OFFSET 6
  37. #define SEC_DE_OFFSET_V3 9
  38. #define SEC_SCENE_OFFSET_V3 5
  39. #define SEC_CKEY_OFFSET_V3 13
  40. #define SEC_CTR_CNT_OFFSET 25
  41. #define SEC_CTR_CNT_ROLLOVER 2
  42. #define SEC_SRC_SGL_OFFSET_V3 11
  43. #define SEC_DST_SGL_OFFSET_V3 14
  44. #define SEC_CALG_OFFSET_V3 4
  45. #define SEC_AKEY_OFFSET_V3 9
  46. #define SEC_MAC_OFFSET_V3 4
  47. #define SEC_AUTH_ALG_OFFSET_V3 15
  48. #define SEC_CIPHER_AUTH_V3 0xbf
  49. #define SEC_AUTH_CIPHER_V3 0x40
  50. #define SEC_FLAG_OFFSET 7
  51. #define SEC_FLAG_MASK 0x0780
  52. #define SEC_TYPE_MASK 0x0F
  53. #define SEC_DONE_MASK 0x0001
  54. #define SEC_ICV_MASK 0x000E
  55. #define SEC_SQE_LEN_RATE_MASK 0x3
  56. #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
  57. #define SEC_SGL_SGE_NR 128
  58. #define SEC_CIPHER_AUTH 0xfe
  59. #define SEC_AUTH_CIPHER 0x1
  60. #define SEC_MAX_MAC_LEN 64
  61. #define SEC_MAX_AAD_LEN 65535
  62. #define SEC_MAX_CCM_AAD_LEN 65279
  63. #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
  64. #define SEC_PBUF_SZ 512
  65. #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
  66. #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
  67. #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
  68. SEC_MAX_MAC_LEN * 2)
  69. #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
  70. #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)
  71. #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \
  72. SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
  73. #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
  74. SEC_PBUF_LEFT_SZ(depth))
  75. #define SEC_SQE_LEN_RATE 4
  76. #define SEC_SQE_CFLAG 2
  77. #define SEC_SQE_AEAD_FLAG 3
  78. #define SEC_SQE_DONE 0x1
  79. #define SEC_ICV_ERR 0x2
  80. #define MIN_MAC_LEN 4
  81. #define MAC_LEN_MASK 0x1U
  82. #define MAX_INPUT_DATA_LEN 0xFFFE00
  83. #define BITS_MASK 0xFF
  84. #define BYTE_BITS 0x8
  85. #define SEC_XTS_NAME_SZ 0x3
  86. #define IV_CM_CAL_NUM 2
  87. #define IV_CL_MASK 0x7
  88. #define IV_CL_MIN 2
  89. #define IV_CL_MID 4
  90. #define IV_CL_MAX 8
  91. #define IV_FLAGS_OFFSET 0x6
  92. #define IV_CM_OFFSET 0x3
  93. #define IV_LAST_BYTE1 1
  94. #define IV_LAST_BYTE2 2
  95. #define IV_LAST_BYTE_MASK 0xFF
  96. #define IV_CTR_INIT 0x1
  97. #define IV_BYTE_OFFSET 0x8
  98. struct sec_skcipher {
  99. u64 alg_msk;
  100. struct skcipher_alg alg;
  101. };
  102. struct sec_aead {
  103. u64 alg_msk;
  104. struct aead_alg alg;
  105. };
  106. /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
  107. static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
  108. {
  109. if (req->c_req.encrypt)
  110. return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
  111. ctx->hlf_q_num;
  112. return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
  113. ctx->hlf_q_num;
  114. }
  115. static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
  116. {
  117. if (req->c_req.encrypt)
  118. atomic_dec(&ctx->enc_qcyclic);
  119. else
  120. atomic_dec(&ctx->dec_qcyclic);
  121. }
  122. static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
  123. {
  124. int req_id;
  125. spin_lock_bh(&qp_ctx->req_lock);
  126. req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
  127. spin_unlock_bh(&qp_ctx->req_lock);
  128. if (unlikely(req_id < 0)) {
  129. dev_err(req->ctx->dev, "alloc req id fail!\n");
  130. return req_id;
  131. }
  132. req->qp_ctx = qp_ctx;
  133. qp_ctx->req_list[req_id] = req;
  134. return req_id;
  135. }
  136. static void sec_free_req_id(struct sec_req *req)
  137. {
  138. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  139. int req_id = req->req_id;
  140. if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
  141. dev_err(req->ctx->dev, "free request id invalid!\n");
  142. return;
  143. }
  144. qp_ctx->req_list[req_id] = NULL;
  145. req->qp_ctx = NULL;
  146. spin_lock_bh(&qp_ctx->req_lock);
  147. idr_remove(&qp_ctx->req_idr, req_id);
  148. spin_unlock_bh(&qp_ctx->req_lock);
  149. }
  150. static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
  151. {
  152. struct sec_sqe *bd = resp;
  153. status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
  154. status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
  155. status->flag = (le16_to_cpu(bd->type2.done_flag) &
  156. SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
  157. status->tag = le16_to_cpu(bd->type2.tag);
  158. status->err_type = bd->type2.error_type;
  159. return bd->type_cipher_auth & SEC_TYPE_MASK;
  160. }
  161. static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
  162. {
  163. struct sec_sqe3 *bd3 = resp;
  164. status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
  165. status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
  166. status->flag = (le16_to_cpu(bd3->done_flag) &
  167. SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
  168. status->tag = le64_to_cpu(bd3->tag);
  169. status->err_type = bd3->error_type;
  170. return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
  171. }
  172. static int sec_cb_status_check(struct sec_req *req,
  173. struct bd_status *status)
  174. {
  175. struct sec_ctx *ctx = req->ctx;
  176. if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
  177. dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
  178. req->err_type, status->done);
  179. return -EIO;
  180. }
  181. if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
  182. if (unlikely(status->flag != SEC_SQE_CFLAG)) {
  183. dev_err_ratelimited(ctx->dev, "flag[%u]\n",
  184. status->flag);
  185. return -EIO;
  186. }
  187. } else if (unlikely(ctx->alg_type == SEC_AEAD)) {
  188. if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
  189. status->icv == SEC_ICV_ERR)) {
  190. dev_err_ratelimited(ctx->dev,
  191. "flag[%u], icv[%u]\n",
  192. status->flag, status->icv);
  193. return -EBADMSG;
  194. }
  195. }
  196. return 0;
  197. }
  198. static void sec_req_cb(struct hisi_qp *qp, void *resp)
  199. {
  200. struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
  201. struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
  202. u8 type_supported = qp_ctx->ctx->type_supported;
  203. struct bd_status status;
  204. struct sec_ctx *ctx;
  205. struct sec_req *req;
  206. int err;
  207. u8 type;
  208. if (type_supported == SEC_BD_TYPE2) {
  209. type = pre_parse_finished_bd(&status, resp);
  210. req = qp_ctx->req_list[status.tag];
  211. } else {
  212. type = pre_parse_finished_bd3(&status, resp);
  213. req = (void *)(uintptr_t)status.tag;
  214. }
  215. if (unlikely(type != type_supported)) {
  216. atomic64_inc(&dfx->err_bd_cnt);
  217. pr_err("err bd type [%u]\n", type);
  218. return;
  219. }
  220. if (unlikely(!req)) {
  221. atomic64_inc(&dfx->invalid_req_cnt);
  222. atomic_inc(&qp->qp_status.used);
  223. return;
  224. }
  225. req->err_type = status.err_type;
  226. ctx = req->ctx;
  227. err = sec_cb_status_check(req, &status);
  228. if (err)
  229. atomic64_inc(&dfx->done_flag_cnt);
  230. atomic64_inc(&dfx->recv_cnt);
  231. ctx->req_op->buf_unmap(ctx, req);
  232. ctx->req_op->callback(ctx, req, err);
  233. }
  234. static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
  235. {
  236. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  237. int ret;
  238. if (ctx->fake_req_limit <=
  239. atomic_read(&qp_ctx->qp->qp_status.used) &&
  240. !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
  241. return -EBUSY;
  242. spin_lock_bh(&qp_ctx->req_lock);
  243. ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
  244. if (ctx->fake_req_limit <=
  245. atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
  246. list_add_tail(&req->backlog_head, &qp_ctx->backlog);
  247. atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
  248. atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
  249. spin_unlock_bh(&qp_ctx->req_lock);
  250. return -EBUSY;
  251. }
  252. spin_unlock_bh(&qp_ctx->req_lock);
  253. if (unlikely(ret == -EBUSY))
  254. return -ENOBUFS;
  255. if (likely(!ret)) {
  256. ret = -EINPROGRESS;
  257. atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
  258. }
  259. return ret;
  260. }
  261. /* Get DMA memory resources */
  262. static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
  263. {
  264. u16 q_depth = res->depth;
  265. int i;
  266. res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
  267. &res->c_ivin_dma, GFP_KERNEL);
  268. if (!res->c_ivin)
  269. return -ENOMEM;
  270. for (i = 1; i < q_depth; i++) {
  271. res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
  272. res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
  273. }
  274. return 0;
  275. }
  276. static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
  277. {
  278. if (res->c_ivin)
  279. dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
  280. res->c_ivin, res->c_ivin_dma);
  281. }
  282. static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
  283. {
  284. u16 q_depth = res->depth;
  285. int i;
  286. res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
  287. &res->a_ivin_dma, GFP_KERNEL);
  288. if (!res->a_ivin)
  289. return -ENOMEM;
  290. for (i = 1; i < q_depth; i++) {
  291. res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
  292. res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
  293. }
  294. return 0;
  295. }
  296. static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
  297. {
  298. if (res->a_ivin)
  299. dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
  300. res->a_ivin, res->a_ivin_dma);
  301. }
  302. static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
  303. {
  304. u16 q_depth = res->depth;
  305. int i;
  306. res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
  307. &res->out_mac_dma, GFP_KERNEL);
  308. if (!res->out_mac)
  309. return -ENOMEM;
  310. for (i = 1; i < q_depth; i++) {
  311. res[i].out_mac_dma = res->out_mac_dma +
  312. i * (SEC_MAX_MAC_LEN << 1);
  313. res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
  314. }
  315. return 0;
  316. }
  317. static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
  318. {
  319. if (res->out_mac)
  320. dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
  321. res->out_mac, res->out_mac_dma);
  322. }
  323. static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
  324. {
  325. if (res->pbuf)
  326. dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
  327. res->pbuf, res->pbuf_dma);
  328. }
  329. /*
  330. * To improve performance, pbuffer is used for
  331. * small packets (< 512Bytes) as IOMMU translation using.
  332. */
  333. static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
  334. {
  335. u16 q_depth = res->depth;
  336. int size = SEC_PBUF_PAGE_NUM(q_depth);
  337. int pbuf_page_offset;
  338. int i, j, k;
  339. res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
  340. &res->pbuf_dma, GFP_KERNEL);
  341. if (!res->pbuf)
  342. return -ENOMEM;
  343. /*
  344. * SEC_PBUF_PKG contains data pbuf, iv and
  345. * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
  346. * Every PAGE contains six SEC_PBUF_PKG
  347. * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
  348. * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
  349. * for the SEC_TOTAL_PBUF_SZ
  350. */
  351. for (i = 0; i <= size; i++) {
  352. pbuf_page_offset = PAGE_SIZE * i;
  353. for (j = 0; j < SEC_PBUF_NUM; j++) {
  354. k = i * SEC_PBUF_NUM + j;
  355. if (k == q_depth)
  356. break;
  357. res[k].pbuf = res->pbuf +
  358. j * SEC_PBUF_PKG + pbuf_page_offset;
  359. res[k].pbuf_dma = res->pbuf_dma +
  360. j * SEC_PBUF_PKG + pbuf_page_offset;
  361. }
  362. }
  363. return 0;
  364. }
  365. static int sec_alg_resource_alloc(struct sec_ctx *ctx,
  366. struct sec_qp_ctx *qp_ctx)
  367. {
  368. struct sec_alg_res *res = qp_ctx->res;
  369. struct device *dev = ctx->dev;
  370. int ret;
  371. ret = sec_alloc_civ_resource(dev, res);
  372. if (ret)
  373. return ret;
  374. if (ctx->alg_type == SEC_AEAD) {
  375. ret = sec_alloc_aiv_resource(dev, res);
  376. if (ret)
  377. goto alloc_aiv_fail;
  378. ret = sec_alloc_mac_resource(dev, res);
  379. if (ret)
  380. goto alloc_mac_fail;
  381. }
  382. if (ctx->pbuf_supported) {
  383. ret = sec_alloc_pbuf_resource(dev, res);
  384. if (ret) {
  385. dev_err(dev, "fail to alloc pbuf dma resource!\n");
  386. goto alloc_pbuf_fail;
  387. }
  388. }
  389. return 0;
  390. alloc_pbuf_fail:
  391. if (ctx->alg_type == SEC_AEAD)
  392. sec_free_mac_resource(dev, qp_ctx->res);
  393. alloc_mac_fail:
  394. if (ctx->alg_type == SEC_AEAD)
  395. sec_free_aiv_resource(dev, res);
  396. alloc_aiv_fail:
  397. sec_free_civ_resource(dev, res);
  398. return ret;
  399. }
  400. static void sec_alg_resource_free(struct sec_ctx *ctx,
  401. struct sec_qp_ctx *qp_ctx)
  402. {
  403. struct device *dev = ctx->dev;
  404. sec_free_civ_resource(dev, qp_ctx->res);
  405. if (ctx->pbuf_supported)
  406. sec_free_pbuf_resource(dev, qp_ctx->res);
  407. if (ctx->alg_type == SEC_AEAD)
  408. sec_free_mac_resource(dev, qp_ctx->res);
  409. }
  410. static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
  411. struct sec_qp_ctx *qp_ctx)
  412. {
  413. u16 q_depth = qp_ctx->qp->sq_depth;
  414. struct device *dev = ctx->dev;
  415. int ret = -ENOMEM;
  416. qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
  417. if (!qp_ctx->req_list)
  418. return ret;
  419. qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
  420. if (!qp_ctx->res)
  421. goto err_free_req_list;
  422. qp_ctx->res->depth = q_depth;
  423. qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
  424. if (IS_ERR(qp_ctx->c_in_pool)) {
  425. dev_err(dev, "fail to create sgl pool for input!\n");
  426. goto err_free_res;
  427. }
  428. qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
  429. if (IS_ERR(qp_ctx->c_out_pool)) {
  430. dev_err(dev, "fail to create sgl pool for output!\n");
  431. goto err_free_c_in_pool;
  432. }
  433. ret = sec_alg_resource_alloc(ctx, qp_ctx);
  434. if (ret)
  435. goto err_free_c_out_pool;
  436. return 0;
  437. err_free_c_out_pool:
  438. hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
  439. err_free_c_in_pool:
  440. hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
  441. err_free_res:
  442. kfree(qp_ctx->res);
  443. err_free_req_list:
  444. kfree(qp_ctx->req_list);
  445. return ret;
  446. }
  447. static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
  448. {
  449. struct device *dev = ctx->dev;
  450. sec_alg_resource_free(ctx, qp_ctx);
  451. hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
  452. hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
  453. kfree(qp_ctx->res);
  454. kfree(qp_ctx->req_list);
  455. }
  456. static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
  457. int qp_ctx_id, int alg_type)
  458. {
  459. struct sec_qp_ctx *qp_ctx;
  460. struct hisi_qp *qp;
  461. int ret;
  462. qp_ctx = &ctx->qp_ctx[qp_ctx_id];
  463. qp = ctx->qps[qp_ctx_id];
  464. qp->req_type = 0;
  465. qp->qp_ctx = qp_ctx;
  466. qp_ctx->qp = qp;
  467. qp_ctx->ctx = ctx;
  468. qp->req_cb = sec_req_cb;
  469. spin_lock_init(&qp_ctx->req_lock);
  470. idr_init(&qp_ctx->req_idr);
  471. INIT_LIST_HEAD(&qp_ctx->backlog);
  472. ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx);
  473. if (ret)
  474. goto err_destroy_idr;
  475. ret = hisi_qm_start_qp(qp, 0);
  476. if (ret < 0)
  477. goto err_resource_free;
  478. return 0;
  479. err_resource_free:
  480. sec_free_qp_ctx_resource(ctx, qp_ctx);
  481. err_destroy_idr:
  482. idr_destroy(&qp_ctx->req_idr);
  483. return ret;
  484. }
  485. static void sec_release_qp_ctx(struct sec_ctx *ctx,
  486. struct sec_qp_ctx *qp_ctx)
  487. {
  488. hisi_qm_stop_qp(qp_ctx->qp);
  489. sec_free_qp_ctx_resource(ctx, qp_ctx);
  490. idr_destroy(&qp_ctx->req_idr);
  491. }
  492. static int sec_ctx_base_init(struct sec_ctx *ctx)
  493. {
  494. struct sec_dev *sec;
  495. int i, ret;
  496. ctx->qps = sec_create_qps();
  497. if (!ctx->qps) {
  498. pr_err("Can not create sec qps!\n");
  499. return -ENODEV;
  500. }
  501. sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
  502. ctx->sec = sec;
  503. ctx->dev = &sec->qm.pdev->dev;
  504. ctx->hlf_q_num = sec->ctx_q_num >> 1;
  505. ctx->pbuf_supported = ctx->sec->iommu_used;
  506. /* Half of queue depth is taken as fake requests limit in the queue. */
  507. ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
  508. ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
  509. GFP_KERNEL);
  510. if (!ctx->qp_ctx) {
  511. ret = -ENOMEM;
  512. goto err_destroy_qps;
  513. }
  514. for (i = 0; i < sec->ctx_q_num; i++) {
  515. ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
  516. if (ret)
  517. goto err_sec_release_qp_ctx;
  518. }
  519. return 0;
  520. err_sec_release_qp_ctx:
  521. for (i = i - 1; i >= 0; i--)
  522. sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
  523. kfree(ctx->qp_ctx);
  524. err_destroy_qps:
  525. sec_destroy_qps(ctx->qps, sec->ctx_q_num);
  526. return ret;
  527. }
  528. static void sec_ctx_base_uninit(struct sec_ctx *ctx)
  529. {
  530. int i;
  531. for (i = 0; i < ctx->sec->ctx_q_num; i++)
  532. sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
  533. sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
  534. kfree(ctx->qp_ctx);
  535. }
  536. static int sec_cipher_init(struct sec_ctx *ctx)
  537. {
  538. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  539. c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
  540. &c_ctx->c_key_dma, GFP_KERNEL);
  541. if (!c_ctx->c_key)
  542. return -ENOMEM;
  543. return 0;
  544. }
  545. static void sec_cipher_uninit(struct sec_ctx *ctx)
  546. {
  547. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  548. memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
  549. dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
  550. c_ctx->c_key, c_ctx->c_key_dma);
  551. }
  552. static int sec_auth_init(struct sec_ctx *ctx)
  553. {
  554. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  555. a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
  556. &a_ctx->a_key_dma, GFP_KERNEL);
  557. if (!a_ctx->a_key)
  558. return -ENOMEM;
  559. return 0;
  560. }
  561. static void sec_auth_uninit(struct sec_ctx *ctx)
  562. {
  563. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  564. memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
  565. dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
  566. a_ctx->a_key, a_ctx->a_key_dma);
  567. }
  568. static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
  569. {
  570. const char *alg = crypto_tfm_alg_name(&tfm->base);
  571. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  572. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  573. c_ctx->fallback = false;
  574. /* Currently, only XTS mode need fallback tfm when using 192bit key */
  575. if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
  576. return 0;
  577. c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
  578. CRYPTO_ALG_NEED_FALLBACK);
  579. if (IS_ERR(c_ctx->fbtfm)) {
  580. pr_err("failed to alloc xts mode fallback tfm!\n");
  581. return PTR_ERR(c_ctx->fbtfm);
  582. }
  583. return 0;
  584. }
  585. static int sec_skcipher_init(struct crypto_skcipher *tfm)
  586. {
  587. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  588. int ret;
  589. ctx->alg_type = SEC_SKCIPHER;
  590. crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
  591. ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
  592. if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
  593. pr_err("get error skcipher iv size!\n");
  594. return -EINVAL;
  595. }
  596. ret = sec_ctx_base_init(ctx);
  597. if (ret)
  598. return ret;
  599. ret = sec_cipher_init(ctx);
  600. if (ret)
  601. goto err_cipher_init;
  602. ret = sec_skcipher_fbtfm_init(tfm);
  603. if (ret)
  604. goto err_fbtfm_init;
  605. return 0;
  606. err_fbtfm_init:
  607. sec_cipher_uninit(ctx);
  608. err_cipher_init:
  609. sec_ctx_base_uninit(ctx);
  610. return ret;
  611. }
  612. static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
  613. {
  614. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  615. if (ctx->c_ctx.fbtfm)
  616. crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
  617. sec_cipher_uninit(ctx);
  618. sec_ctx_base_uninit(ctx);
  619. }
  620. static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
  621. const u32 keylen,
  622. const enum sec_cmode c_mode)
  623. {
  624. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  625. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  626. int ret;
  627. ret = verify_skcipher_des3_key(tfm, key);
  628. if (ret)
  629. return ret;
  630. switch (keylen) {
  631. case SEC_DES3_2KEY_SIZE:
  632. c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
  633. break;
  634. case SEC_DES3_3KEY_SIZE:
  635. c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
  636. break;
  637. default:
  638. return -EINVAL;
  639. }
  640. return 0;
  641. }
  642. static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
  643. const u32 keylen,
  644. const enum sec_cmode c_mode)
  645. {
  646. if (c_mode == SEC_CMODE_XTS) {
  647. switch (keylen) {
  648. case SEC_XTS_MIN_KEY_SIZE:
  649. c_ctx->c_key_len = SEC_CKEY_128BIT;
  650. break;
  651. case SEC_XTS_MID_KEY_SIZE:
  652. c_ctx->fallback = true;
  653. break;
  654. case SEC_XTS_MAX_KEY_SIZE:
  655. c_ctx->c_key_len = SEC_CKEY_256BIT;
  656. break;
  657. default:
  658. pr_err("hisi_sec2: xts mode key error!\n");
  659. return -EINVAL;
  660. }
  661. } else {
  662. if (c_ctx->c_alg == SEC_CALG_SM4 &&
  663. keylen != AES_KEYSIZE_128) {
  664. pr_err("hisi_sec2: sm4 key error!\n");
  665. return -EINVAL;
  666. } else {
  667. switch (keylen) {
  668. case AES_KEYSIZE_128:
  669. c_ctx->c_key_len = SEC_CKEY_128BIT;
  670. break;
  671. case AES_KEYSIZE_192:
  672. c_ctx->c_key_len = SEC_CKEY_192BIT;
  673. break;
  674. case AES_KEYSIZE_256:
  675. c_ctx->c_key_len = SEC_CKEY_256BIT;
  676. break;
  677. default:
  678. pr_err("hisi_sec2: aes key error!\n");
  679. return -EINVAL;
  680. }
  681. }
  682. }
  683. return 0;
  684. }
  685. static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
  686. const u32 keylen, const enum sec_calg c_alg,
  687. const enum sec_cmode c_mode)
  688. {
  689. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  690. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  691. struct device *dev = ctx->dev;
  692. int ret;
  693. if (c_mode == SEC_CMODE_XTS) {
  694. ret = xts_verify_key(tfm, key, keylen);
  695. if (ret) {
  696. dev_err(dev, "xts mode key err!\n");
  697. return ret;
  698. }
  699. }
  700. c_ctx->c_alg = c_alg;
  701. c_ctx->c_mode = c_mode;
  702. switch (c_alg) {
  703. case SEC_CALG_3DES:
  704. ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
  705. break;
  706. case SEC_CALG_AES:
  707. case SEC_CALG_SM4:
  708. ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
  709. break;
  710. default:
  711. return -EINVAL;
  712. }
  713. if (ret) {
  714. dev_err(dev, "set sec key err!\n");
  715. return ret;
  716. }
  717. memcpy(c_ctx->c_key, key, keylen);
  718. if (c_ctx->fallback && c_ctx->fbtfm) {
  719. ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
  720. if (ret) {
  721. dev_err(dev, "failed to set fallback skcipher key!\n");
  722. return ret;
  723. }
  724. }
  725. return 0;
  726. }
  727. #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
  728. static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
  729. u32 keylen) \
  730. { \
  731. return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
  732. }
  733. GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
  734. GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
  735. GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
  736. GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
  737. GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
  738. GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
  739. GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
  740. GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
  741. GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
  742. GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
  743. GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
  744. GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
  745. GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
  746. static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
  747. struct scatterlist *src)
  748. {
  749. struct sec_aead_req *a_req = &req->aead_req;
  750. struct aead_request *aead_req = a_req->aead_req;
  751. struct sec_cipher_req *c_req = &req->c_req;
  752. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  753. struct device *dev = ctx->dev;
  754. int copy_size, pbuf_length;
  755. int req_id = req->req_id;
  756. struct crypto_aead *tfm;
  757. size_t authsize;
  758. u8 *mac_offset;
  759. if (ctx->alg_type == SEC_AEAD)
  760. copy_size = aead_req->cryptlen + aead_req->assoclen;
  761. else
  762. copy_size = c_req->c_len;
  763. pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
  764. qp_ctx->res[req_id].pbuf, copy_size);
  765. if (unlikely(pbuf_length != copy_size)) {
  766. dev_err(dev, "copy src data to pbuf error!\n");
  767. return -EINVAL;
  768. }
  769. if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
  770. tfm = crypto_aead_reqtfm(aead_req);
  771. authsize = crypto_aead_authsize(tfm);
  772. mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
  773. memcpy(a_req->out_mac, mac_offset, authsize);
  774. }
  775. req->in_dma = qp_ctx->res[req_id].pbuf_dma;
  776. c_req->c_out_dma = req->in_dma;
  777. return 0;
  778. }
  779. static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
  780. struct scatterlist *dst)
  781. {
  782. struct aead_request *aead_req = req->aead_req.aead_req;
  783. struct sec_cipher_req *c_req = &req->c_req;
  784. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  785. int copy_size, pbuf_length;
  786. int req_id = req->req_id;
  787. if (ctx->alg_type == SEC_AEAD)
  788. copy_size = c_req->c_len + aead_req->assoclen;
  789. else
  790. copy_size = c_req->c_len;
  791. pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
  792. qp_ctx->res[req_id].pbuf, copy_size);
  793. if (unlikely(pbuf_length != copy_size))
  794. dev_err(ctx->dev, "copy pbuf data to dst error!\n");
  795. }
  796. static int sec_aead_mac_init(struct sec_aead_req *req)
  797. {
  798. struct aead_request *aead_req = req->aead_req;
  799. struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
  800. size_t authsize = crypto_aead_authsize(tfm);
  801. u8 *mac_out = req->out_mac;
  802. struct scatterlist *sgl = aead_req->src;
  803. size_t copy_size;
  804. off_t skip_size;
  805. /* Copy input mac */
  806. skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
  807. copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
  808. authsize, skip_size);
  809. if (unlikely(copy_size != authsize))
  810. return -EINVAL;
  811. return 0;
  812. }
  813. static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
  814. struct scatterlist *src, struct scatterlist *dst)
  815. {
  816. struct sec_cipher_req *c_req = &req->c_req;
  817. struct sec_aead_req *a_req = &req->aead_req;
  818. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  819. struct sec_alg_res *res = &qp_ctx->res[req->req_id];
  820. struct device *dev = ctx->dev;
  821. int ret;
  822. if (req->use_pbuf) {
  823. c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
  824. c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
  825. if (ctx->alg_type == SEC_AEAD) {
  826. a_req->a_ivin = res->a_ivin;
  827. a_req->a_ivin_dma = res->a_ivin_dma;
  828. a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
  829. a_req->out_mac_dma = res->pbuf_dma +
  830. SEC_PBUF_MAC_OFFSET;
  831. }
  832. ret = sec_cipher_pbuf_map(ctx, req, src);
  833. return ret;
  834. }
  835. c_req->c_ivin = res->c_ivin;
  836. c_req->c_ivin_dma = res->c_ivin_dma;
  837. if (ctx->alg_type == SEC_AEAD) {
  838. a_req->a_ivin = res->a_ivin;
  839. a_req->a_ivin_dma = res->a_ivin_dma;
  840. a_req->out_mac = res->out_mac;
  841. a_req->out_mac_dma = res->out_mac_dma;
  842. }
  843. req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
  844. qp_ctx->c_in_pool,
  845. req->req_id,
  846. &req->in_dma);
  847. if (IS_ERR(req->in)) {
  848. dev_err(dev, "fail to dma map input sgl buffers!\n");
  849. return PTR_ERR(req->in);
  850. }
  851. if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
  852. ret = sec_aead_mac_init(a_req);
  853. if (unlikely(ret)) {
  854. dev_err(dev, "fail to init mac data for ICV!\n");
  855. return ret;
  856. }
  857. }
  858. if (dst == src) {
  859. c_req->c_out = req->in;
  860. c_req->c_out_dma = req->in_dma;
  861. } else {
  862. c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
  863. qp_ctx->c_out_pool,
  864. req->req_id,
  865. &c_req->c_out_dma);
  866. if (IS_ERR(c_req->c_out)) {
  867. dev_err(dev, "fail to dma map output sgl buffers!\n");
  868. hisi_acc_sg_buf_unmap(dev, src, req->in);
  869. return PTR_ERR(c_req->c_out);
  870. }
  871. }
  872. return 0;
  873. }
  874. static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
  875. struct scatterlist *src, struct scatterlist *dst)
  876. {
  877. struct sec_cipher_req *c_req = &req->c_req;
  878. struct device *dev = ctx->dev;
  879. if (req->use_pbuf) {
  880. sec_cipher_pbuf_unmap(ctx, req, dst);
  881. } else {
  882. if (dst != src)
  883. hisi_acc_sg_buf_unmap(dev, src, req->in);
  884. hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
  885. }
  886. }
  887. static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
  888. {
  889. struct skcipher_request *sq = req->c_req.sk_req;
  890. return sec_cipher_map(ctx, req, sq->src, sq->dst);
  891. }
  892. static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
  893. {
  894. struct skcipher_request *sq = req->c_req.sk_req;
  895. sec_cipher_unmap(ctx, req, sq->src, sq->dst);
  896. }
  897. static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
  898. struct crypto_authenc_keys *keys)
  899. {
  900. switch (keys->enckeylen) {
  901. case AES_KEYSIZE_128:
  902. c_ctx->c_key_len = SEC_CKEY_128BIT;
  903. break;
  904. case AES_KEYSIZE_192:
  905. c_ctx->c_key_len = SEC_CKEY_192BIT;
  906. break;
  907. case AES_KEYSIZE_256:
  908. c_ctx->c_key_len = SEC_CKEY_256BIT;
  909. break;
  910. default:
  911. pr_err("hisi_sec2: aead aes key error!\n");
  912. return -EINVAL;
  913. }
  914. memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
  915. return 0;
  916. }
  917. static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
  918. struct crypto_authenc_keys *keys)
  919. {
  920. struct crypto_shash *hash_tfm = ctx->hash_tfm;
  921. int blocksize, digestsize, ret;
  922. if (!keys->authkeylen) {
  923. pr_err("hisi_sec2: aead auth key error!\n");
  924. return -EINVAL;
  925. }
  926. blocksize = crypto_shash_blocksize(hash_tfm);
  927. digestsize = crypto_shash_digestsize(hash_tfm);
  928. if (keys->authkeylen > blocksize) {
  929. ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
  930. keys->authkeylen, ctx->a_key);
  931. if (ret) {
  932. pr_err("hisi_sec2: aead auth digest error!\n");
  933. return -EINVAL;
  934. }
  935. ctx->a_key_len = digestsize;
  936. } else {
  937. memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
  938. ctx->a_key_len = keys->authkeylen;
  939. }
  940. return 0;
  941. }
  942. static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
  943. {
  944. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  945. struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
  946. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  947. if (unlikely(a_ctx->fallback_aead_tfm))
  948. return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
  949. return 0;
  950. }
  951. static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
  952. struct crypto_aead *tfm, const u8 *key,
  953. unsigned int keylen)
  954. {
  955. crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
  956. crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
  957. crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
  958. return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
  959. }
  960. static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
  961. const u32 keylen, const enum sec_hash_alg a_alg,
  962. const enum sec_calg c_alg,
  963. const enum sec_mac_len mac_len,
  964. const enum sec_cmode c_mode)
  965. {
  966. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  967. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  968. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  969. struct device *dev = ctx->dev;
  970. struct crypto_authenc_keys keys;
  971. int ret;
  972. ctx->a_ctx.a_alg = a_alg;
  973. ctx->c_ctx.c_alg = c_alg;
  974. ctx->a_ctx.mac_len = mac_len;
  975. c_ctx->c_mode = c_mode;
  976. if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
  977. ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
  978. if (ret) {
  979. dev_err(dev, "set sec aes ccm cipher key err!\n");
  980. return ret;
  981. }
  982. memcpy(c_ctx->c_key, key, keylen);
  983. if (unlikely(a_ctx->fallback_aead_tfm)) {
  984. ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
  985. if (ret)
  986. return ret;
  987. }
  988. return 0;
  989. }
  990. if (crypto_authenc_extractkeys(&keys, key, keylen))
  991. goto bad_key;
  992. ret = sec_aead_aes_set_key(c_ctx, &keys);
  993. if (ret) {
  994. dev_err(dev, "set sec cipher key err!\n");
  995. goto bad_key;
  996. }
  997. ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
  998. if (ret) {
  999. dev_err(dev, "set sec auth key err!\n");
  1000. goto bad_key;
  1001. }
  1002. if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
  1003. (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
  1004. dev_err(dev, "MAC or AUTH key length error!\n");
  1005. goto bad_key;
  1006. }
  1007. return 0;
  1008. bad_key:
  1009. memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
  1010. return -EINVAL;
  1011. }
  1012. #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
  1013. static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
  1014. u32 keylen) \
  1015. { \
  1016. return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
  1017. }
  1018. GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
  1019. SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
  1020. GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
  1021. SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
  1022. GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
  1023. SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
  1024. GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
  1025. SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
  1026. GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
  1027. SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
  1028. GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
  1029. SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
  1030. GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
  1031. SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
  1032. static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
  1033. {
  1034. struct aead_request *aq = req->aead_req.aead_req;
  1035. return sec_cipher_map(ctx, req, aq->src, aq->dst);
  1036. }
  1037. static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
  1038. {
  1039. struct aead_request *aq = req->aead_req.aead_req;
  1040. sec_cipher_unmap(ctx, req, aq->src, aq->dst);
  1041. }
  1042. static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
  1043. {
  1044. int ret;
  1045. ret = ctx->req_op->buf_map(ctx, req);
  1046. if (unlikely(ret))
  1047. return ret;
  1048. ctx->req_op->do_transfer(ctx, req);
  1049. ret = ctx->req_op->bd_fill(ctx, req);
  1050. if (unlikely(ret))
  1051. goto unmap_req_buf;
  1052. return ret;
  1053. unmap_req_buf:
  1054. ctx->req_op->buf_unmap(ctx, req);
  1055. return ret;
  1056. }
  1057. static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
  1058. {
  1059. ctx->req_op->buf_unmap(ctx, req);
  1060. }
  1061. static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
  1062. {
  1063. struct skcipher_request *sk_req = req->c_req.sk_req;
  1064. struct sec_cipher_req *c_req = &req->c_req;
  1065. memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
  1066. }
  1067. static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
  1068. {
  1069. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  1070. struct sec_cipher_req *c_req = &req->c_req;
  1071. struct sec_sqe *sec_sqe = &req->sec_sqe;
  1072. u8 scene, sa_type, da_type;
  1073. u8 bd_type, cipher;
  1074. u8 de = 0;
  1075. memset(sec_sqe, 0, sizeof(struct sec_sqe));
  1076. sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
  1077. sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
  1078. sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
  1079. sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
  1080. sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
  1081. SEC_CMODE_OFFSET);
  1082. sec_sqe->type2.c_alg = c_ctx->c_alg;
  1083. sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
  1084. SEC_CKEY_OFFSET);
  1085. bd_type = SEC_BD_TYPE2;
  1086. if (c_req->encrypt)
  1087. cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
  1088. else
  1089. cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
  1090. sec_sqe->type_cipher_auth = bd_type | cipher;
  1091. /* Set destination and source address type */
  1092. if (req->use_pbuf) {
  1093. sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
  1094. da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
  1095. } else {
  1096. sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
  1097. da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
  1098. }
  1099. sec_sqe->sdm_addr_type |= da_type;
  1100. scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
  1101. if (req->in_dma != c_req->c_out_dma)
  1102. de = 0x1 << SEC_DE_OFFSET;
  1103. sec_sqe->sds_sa_type = (de | scene | sa_type);
  1104. sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
  1105. sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
  1106. return 0;
  1107. }
  1108. static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
  1109. {
  1110. struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
  1111. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  1112. struct sec_cipher_req *c_req = &req->c_req;
  1113. u32 bd_param = 0;
  1114. u16 cipher;
  1115. memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
  1116. sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
  1117. sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
  1118. sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
  1119. sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
  1120. sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
  1121. c_ctx->c_mode;
  1122. sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
  1123. SEC_CKEY_OFFSET_V3);
  1124. if (c_req->encrypt)
  1125. cipher = SEC_CIPHER_ENC;
  1126. else
  1127. cipher = SEC_CIPHER_DEC;
  1128. sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
  1129. /* Set the CTR counter mode is 128bit rollover */
  1130. sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
  1131. SEC_CTR_CNT_OFFSET);
  1132. if (req->use_pbuf) {
  1133. bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
  1134. bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
  1135. } else {
  1136. bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
  1137. bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
  1138. }
  1139. bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
  1140. if (req->in_dma != c_req->c_out_dma)
  1141. bd_param |= 0x1 << SEC_DE_OFFSET_V3;
  1142. bd_param |= SEC_BD_TYPE3;
  1143. sec_sqe3->bd_param = cpu_to_le32(bd_param);
  1144. sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
  1145. sec_sqe3->tag = cpu_to_le64(req);
  1146. return 0;
  1147. }
  1148. /* increment counter (128-bit int) */
  1149. static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
  1150. {
  1151. do {
  1152. --bits;
  1153. nums += counter[bits];
  1154. counter[bits] = nums & BITS_MASK;
  1155. nums >>= BYTE_BITS;
  1156. } while (bits && nums);
  1157. }
  1158. static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
  1159. {
  1160. struct aead_request *aead_req = req->aead_req.aead_req;
  1161. struct skcipher_request *sk_req = req->c_req.sk_req;
  1162. u32 iv_size = req->ctx->c_ctx.ivsize;
  1163. struct scatterlist *sgl;
  1164. unsigned int cryptlen;
  1165. size_t sz;
  1166. u8 *iv;
  1167. if (req->c_req.encrypt)
  1168. sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
  1169. else
  1170. sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
  1171. if (alg_type == SEC_SKCIPHER) {
  1172. iv = sk_req->iv;
  1173. cryptlen = sk_req->cryptlen;
  1174. } else {
  1175. iv = aead_req->iv;
  1176. cryptlen = aead_req->cryptlen;
  1177. }
  1178. if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
  1179. sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
  1180. cryptlen - iv_size);
  1181. if (unlikely(sz != iv_size))
  1182. dev_err(req->ctx->dev, "copy output iv error!\n");
  1183. } else {
  1184. sz = cryptlen / iv_size;
  1185. if (cryptlen % iv_size)
  1186. sz += 1;
  1187. ctr_iv_inc(iv, iv_size, sz);
  1188. }
  1189. }
  1190. static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
  1191. struct sec_qp_ctx *qp_ctx)
  1192. {
  1193. struct sec_req *backlog_req = NULL;
  1194. spin_lock_bh(&qp_ctx->req_lock);
  1195. if (ctx->fake_req_limit >=
  1196. atomic_read(&qp_ctx->qp->qp_status.used) &&
  1197. !list_empty(&qp_ctx->backlog)) {
  1198. backlog_req = list_first_entry(&qp_ctx->backlog,
  1199. typeof(*backlog_req), backlog_head);
  1200. list_del(&backlog_req->backlog_head);
  1201. }
  1202. spin_unlock_bh(&qp_ctx->req_lock);
  1203. return backlog_req;
  1204. }
  1205. static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
  1206. int err)
  1207. {
  1208. struct skcipher_request *sk_req = req->c_req.sk_req;
  1209. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  1210. struct skcipher_request *backlog_sk_req;
  1211. struct sec_req *backlog_req;
  1212. sec_free_req_id(req);
  1213. /* IV output at encrypto of CBC/CTR mode */
  1214. if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
  1215. ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
  1216. sec_update_iv(req, SEC_SKCIPHER);
  1217. while (1) {
  1218. backlog_req = sec_back_req_clear(ctx, qp_ctx);
  1219. if (!backlog_req)
  1220. break;
  1221. backlog_sk_req = backlog_req->c_req.sk_req;
  1222. backlog_sk_req->base.complete(&backlog_sk_req->base,
  1223. -EINPROGRESS);
  1224. atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
  1225. }
  1226. sk_req->base.complete(&sk_req->base, err);
  1227. }
  1228. static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
  1229. {
  1230. struct aead_request *aead_req = req->aead_req.aead_req;
  1231. struct sec_cipher_req *c_req = &req->c_req;
  1232. struct sec_aead_req *a_req = &req->aead_req;
  1233. size_t authsize = ctx->a_ctx.mac_len;
  1234. u32 data_size = aead_req->cryptlen;
  1235. u8 flage = 0;
  1236. u8 cm, cl;
  1237. /* the specification has been checked in aead_iv_demension_check() */
  1238. cl = c_req->c_ivin[0] + 1;
  1239. c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
  1240. memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
  1241. c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
  1242. /* the last 3bit is L' */
  1243. flage |= c_req->c_ivin[0] & IV_CL_MASK;
  1244. /* the M' is bit3~bit5, the Flags is bit6 */
  1245. cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
  1246. flage |= cm << IV_CM_OFFSET;
  1247. if (aead_req->assoclen)
  1248. flage |= 0x01 << IV_FLAGS_OFFSET;
  1249. memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
  1250. a_req->a_ivin[0] = flage;
  1251. /*
  1252. * the last 32bit is counter's initial number,
  1253. * but the nonce uses the first 16bit
  1254. * the tail 16bit fill with the cipher length
  1255. */
  1256. if (!c_req->encrypt)
  1257. data_size = aead_req->cryptlen - authsize;
  1258. a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
  1259. data_size & IV_LAST_BYTE_MASK;
  1260. data_size >>= IV_BYTE_OFFSET;
  1261. a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
  1262. data_size & IV_LAST_BYTE_MASK;
  1263. }
  1264. static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
  1265. {
  1266. struct aead_request *aead_req = req->aead_req.aead_req;
  1267. struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
  1268. size_t authsize = crypto_aead_authsize(tfm);
  1269. struct sec_cipher_req *c_req = &req->c_req;
  1270. struct sec_aead_req *a_req = &req->aead_req;
  1271. memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
  1272. if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
  1273. /*
  1274. * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
  1275. * the counter must set to 0x01
  1276. */
  1277. ctx->a_ctx.mac_len = authsize;
  1278. /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
  1279. set_aead_auth_iv(ctx, req);
  1280. }
  1281. /* GCM 12Byte Cipher_IV == Auth_IV */
  1282. if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
  1283. ctx->a_ctx.mac_len = authsize;
  1284. memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
  1285. }
  1286. }
  1287. static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
  1288. struct sec_req *req, struct sec_sqe *sec_sqe)
  1289. {
  1290. struct sec_aead_req *a_req = &req->aead_req;
  1291. struct aead_request *aq = a_req->aead_req;
  1292. /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
  1293. sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
  1294. /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
  1295. sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
  1296. sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
  1297. sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
  1298. if (dir)
  1299. sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
  1300. else
  1301. sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
  1302. sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
  1303. sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
  1304. sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
  1305. sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
  1306. }
  1307. static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
  1308. struct sec_req *req, struct sec_sqe3 *sqe3)
  1309. {
  1310. struct sec_aead_req *a_req = &req->aead_req;
  1311. struct aead_request *aq = a_req->aead_req;
  1312. /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
  1313. sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
  1314. /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
  1315. sqe3->a_key_addr = sqe3->c_key_addr;
  1316. sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
  1317. sqe3->auth_mac_key |= SEC_NO_AUTH;
  1318. if (dir)
  1319. sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
  1320. else
  1321. sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
  1322. sqe3->a_len_key = cpu_to_le32(aq->assoclen);
  1323. sqe3->auth_src_offset = cpu_to_le16(0x0);
  1324. sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
  1325. sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
  1326. }
  1327. static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
  1328. struct sec_req *req, struct sec_sqe *sec_sqe)
  1329. {
  1330. struct sec_aead_req *a_req = &req->aead_req;
  1331. struct sec_cipher_req *c_req = &req->c_req;
  1332. struct aead_request *aq = a_req->aead_req;
  1333. sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
  1334. sec_sqe->type2.mac_key_alg =
  1335. cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
  1336. sec_sqe->type2.mac_key_alg |=
  1337. cpu_to_le32((u32)((ctx->a_key_len) /
  1338. SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
  1339. sec_sqe->type2.mac_key_alg |=
  1340. cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
  1341. if (dir) {
  1342. sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
  1343. sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
  1344. } else {
  1345. sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
  1346. sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
  1347. }
  1348. sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
  1349. sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
  1350. sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
  1351. }
  1352. static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
  1353. {
  1354. struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
  1355. struct sec_sqe *sec_sqe = &req->sec_sqe;
  1356. int ret;
  1357. ret = sec_skcipher_bd_fill(ctx, req);
  1358. if (unlikely(ret)) {
  1359. dev_err(ctx->dev, "skcipher bd fill is error!\n");
  1360. return ret;
  1361. }
  1362. if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
  1363. ctx->c_ctx.c_mode == SEC_CMODE_GCM)
  1364. sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
  1365. else
  1366. sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
  1367. return 0;
  1368. }
  1369. static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
  1370. struct sec_req *req, struct sec_sqe3 *sqe3)
  1371. {
  1372. struct sec_aead_req *a_req = &req->aead_req;
  1373. struct sec_cipher_req *c_req = &req->c_req;
  1374. struct aead_request *aq = a_req->aead_req;
  1375. sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
  1376. sqe3->auth_mac_key |=
  1377. cpu_to_le32((u32)(ctx->mac_len /
  1378. SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
  1379. sqe3->auth_mac_key |=
  1380. cpu_to_le32((u32)(ctx->a_key_len /
  1381. SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
  1382. sqe3->auth_mac_key |=
  1383. cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
  1384. if (dir) {
  1385. sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
  1386. sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
  1387. } else {
  1388. sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
  1389. sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
  1390. }
  1391. sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
  1392. sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
  1393. sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
  1394. }
  1395. static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
  1396. {
  1397. struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
  1398. struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
  1399. int ret;
  1400. ret = sec_skcipher_bd_fill_v3(ctx, req);
  1401. if (unlikely(ret)) {
  1402. dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
  1403. return ret;
  1404. }
  1405. if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
  1406. ctx->c_ctx.c_mode == SEC_CMODE_GCM)
  1407. sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
  1408. req, sec_sqe3);
  1409. else
  1410. sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
  1411. req, sec_sqe3);
  1412. return 0;
  1413. }
  1414. static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
  1415. {
  1416. struct aead_request *a_req = req->aead_req.aead_req;
  1417. struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
  1418. struct sec_aead_req *aead_req = &req->aead_req;
  1419. struct sec_cipher_req *c_req = &req->c_req;
  1420. size_t authsize = crypto_aead_authsize(tfm);
  1421. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  1422. struct aead_request *backlog_aead_req;
  1423. struct sec_req *backlog_req;
  1424. size_t sz;
  1425. if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
  1426. sec_update_iv(req, SEC_AEAD);
  1427. /* Copy output mac */
  1428. if (!err && c_req->encrypt) {
  1429. struct scatterlist *sgl = a_req->dst;
  1430. sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
  1431. aead_req->out_mac,
  1432. authsize, a_req->cryptlen +
  1433. a_req->assoclen);
  1434. if (unlikely(sz != authsize)) {
  1435. dev_err(c->dev, "copy out mac err!\n");
  1436. err = -EINVAL;
  1437. }
  1438. }
  1439. sec_free_req_id(req);
  1440. while (1) {
  1441. backlog_req = sec_back_req_clear(c, qp_ctx);
  1442. if (!backlog_req)
  1443. break;
  1444. backlog_aead_req = backlog_req->aead_req.aead_req;
  1445. backlog_aead_req->base.complete(&backlog_aead_req->base,
  1446. -EINPROGRESS);
  1447. atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
  1448. }
  1449. a_req->base.complete(&a_req->base, err);
  1450. }
  1451. static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
  1452. {
  1453. sec_free_req_id(req);
  1454. sec_free_queue_id(ctx, req);
  1455. }
  1456. static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
  1457. {
  1458. struct sec_qp_ctx *qp_ctx;
  1459. int queue_id;
  1460. /* To load balance */
  1461. queue_id = sec_alloc_queue_id(ctx, req);
  1462. qp_ctx = &ctx->qp_ctx[queue_id];
  1463. req->req_id = sec_alloc_req_id(req, qp_ctx);
  1464. if (unlikely(req->req_id < 0)) {
  1465. sec_free_queue_id(ctx, req);
  1466. return req->req_id;
  1467. }
  1468. return 0;
  1469. }
  1470. static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
  1471. {
  1472. struct sec_cipher_req *c_req = &req->c_req;
  1473. int ret;
  1474. ret = sec_request_init(ctx, req);
  1475. if (unlikely(ret))
  1476. return ret;
  1477. ret = sec_request_transfer(ctx, req);
  1478. if (unlikely(ret))
  1479. goto err_uninit_req;
  1480. /* Output IV as decrypto */
  1481. if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
  1482. ctx->c_ctx.c_mode == SEC_CMODE_CTR))
  1483. sec_update_iv(req, ctx->alg_type);
  1484. ret = ctx->req_op->bd_send(ctx, req);
  1485. if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
  1486. (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
  1487. dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
  1488. goto err_send_req;
  1489. }
  1490. return ret;
  1491. err_send_req:
  1492. /* As failing, restore the IV from user */
  1493. if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
  1494. if (ctx->alg_type == SEC_SKCIPHER)
  1495. memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
  1496. ctx->c_ctx.ivsize);
  1497. else
  1498. memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
  1499. ctx->c_ctx.ivsize);
  1500. }
  1501. sec_request_untransfer(ctx, req);
  1502. err_uninit_req:
  1503. sec_request_uninit(ctx, req);
  1504. return ret;
  1505. }
  1506. static const struct sec_req_op sec_skcipher_req_ops = {
  1507. .buf_map = sec_skcipher_sgl_map,
  1508. .buf_unmap = sec_skcipher_sgl_unmap,
  1509. .do_transfer = sec_skcipher_copy_iv,
  1510. .bd_fill = sec_skcipher_bd_fill,
  1511. .bd_send = sec_bd_send,
  1512. .callback = sec_skcipher_callback,
  1513. .process = sec_process,
  1514. };
  1515. static const struct sec_req_op sec_aead_req_ops = {
  1516. .buf_map = sec_aead_sgl_map,
  1517. .buf_unmap = sec_aead_sgl_unmap,
  1518. .do_transfer = sec_aead_set_iv,
  1519. .bd_fill = sec_aead_bd_fill,
  1520. .bd_send = sec_bd_send,
  1521. .callback = sec_aead_callback,
  1522. .process = sec_process,
  1523. };
  1524. static const struct sec_req_op sec_skcipher_req_ops_v3 = {
  1525. .buf_map = sec_skcipher_sgl_map,
  1526. .buf_unmap = sec_skcipher_sgl_unmap,
  1527. .do_transfer = sec_skcipher_copy_iv,
  1528. .bd_fill = sec_skcipher_bd_fill_v3,
  1529. .bd_send = sec_bd_send,
  1530. .callback = sec_skcipher_callback,
  1531. .process = sec_process,
  1532. };
  1533. static const struct sec_req_op sec_aead_req_ops_v3 = {
  1534. .buf_map = sec_aead_sgl_map,
  1535. .buf_unmap = sec_aead_sgl_unmap,
  1536. .do_transfer = sec_aead_set_iv,
  1537. .bd_fill = sec_aead_bd_fill_v3,
  1538. .bd_send = sec_bd_send,
  1539. .callback = sec_aead_callback,
  1540. .process = sec_process,
  1541. };
  1542. static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
  1543. {
  1544. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  1545. int ret;
  1546. ret = sec_skcipher_init(tfm);
  1547. if (ret)
  1548. return ret;
  1549. if (ctx->sec->qm.ver < QM_HW_V3) {
  1550. ctx->type_supported = SEC_BD_TYPE2;
  1551. ctx->req_op = &sec_skcipher_req_ops;
  1552. } else {
  1553. ctx->type_supported = SEC_BD_TYPE3;
  1554. ctx->req_op = &sec_skcipher_req_ops_v3;
  1555. }
  1556. return ret;
  1557. }
  1558. static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
  1559. {
  1560. sec_skcipher_uninit(tfm);
  1561. }
  1562. static int sec_aead_init(struct crypto_aead *tfm)
  1563. {
  1564. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1565. int ret;
  1566. crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
  1567. ctx->alg_type = SEC_AEAD;
  1568. ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
  1569. if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
  1570. ctx->c_ctx.ivsize > SEC_IV_SIZE) {
  1571. pr_err("get error aead iv size!\n");
  1572. return -EINVAL;
  1573. }
  1574. ret = sec_ctx_base_init(ctx);
  1575. if (ret)
  1576. return ret;
  1577. if (ctx->sec->qm.ver < QM_HW_V3) {
  1578. ctx->type_supported = SEC_BD_TYPE2;
  1579. ctx->req_op = &sec_aead_req_ops;
  1580. } else {
  1581. ctx->type_supported = SEC_BD_TYPE3;
  1582. ctx->req_op = &sec_aead_req_ops_v3;
  1583. }
  1584. ret = sec_auth_init(ctx);
  1585. if (ret)
  1586. goto err_auth_init;
  1587. ret = sec_cipher_init(ctx);
  1588. if (ret)
  1589. goto err_cipher_init;
  1590. return ret;
  1591. err_cipher_init:
  1592. sec_auth_uninit(ctx);
  1593. err_auth_init:
  1594. sec_ctx_base_uninit(ctx);
  1595. return ret;
  1596. }
  1597. static void sec_aead_exit(struct crypto_aead *tfm)
  1598. {
  1599. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1600. sec_cipher_uninit(ctx);
  1601. sec_auth_uninit(ctx);
  1602. sec_ctx_base_uninit(ctx);
  1603. }
  1604. static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
  1605. {
  1606. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1607. struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
  1608. int ret;
  1609. ret = sec_aead_init(tfm);
  1610. if (ret) {
  1611. pr_err("hisi_sec2: aead init error!\n");
  1612. return ret;
  1613. }
  1614. auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
  1615. if (IS_ERR(auth_ctx->hash_tfm)) {
  1616. dev_err(ctx->dev, "aead alloc shash error!\n");
  1617. sec_aead_exit(tfm);
  1618. return PTR_ERR(auth_ctx->hash_tfm);
  1619. }
  1620. return 0;
  1621. }
  1622. static void sec_aead_ctx_exit(struct crypto_aead *tfm)
  1623. {
  1624. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1625. crypto_free_shash(ctx->a_ctx.hash_tfm);
  1626. sec_aead_exit(tfm);
  1627. }
  1628. static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
  1629. {
  1630. struct aead_alg *alg = crypto_aead_alg(tfm);
  1631. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1632. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  1633. const char *aead_name = alg->base.cra_name;
  1634. int ret;
  1635. ret = sec_aead_init(tfm);
  1636. if (ret) {
  1637. dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
  1638. return ret;
  1639. }
  1640. a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
  1641. CRYPTO_ALG_NEED_FALLBACK |
  1642. CRYPTO_ALG_ASYNC);
  1643. if (IS_ERR(a_ctx->fallback_aead_tfm)) {
  1644. dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
  1645. sec_aead_exit(tfm);
  1646. return PTR_ERR(a_ctx->fallback_aead_tfm);
  1647. }
  1648. a_ctx->fallback = false;
  1649. return 0;
  1650. }
  1651. static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
  1652. {
  1653. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1654. crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
  1655. sec_aead_exit(tfm);
  1656. }
  1657. static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
  1658. {
  1659. return sec_aead_ctx_init(tfm, "sha1");
  1660. }
  1661. static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
  1662. {
  1663. return sec_aead_ctx_init(tfm, "sha256");
  1664. }
  1665. static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
  1666. {
  1667. return sec_aead_ctx_init(tfm, "sha512");
  1668. }
  1669. static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
  1670. struct sec_req *sreq)
  1671. {
  1672. u32 cryptlen = sreq->c_req.sk_req->cryptlen;
  1673. struct device *dev = ctx->dev;
  1674. u8 c_mode = ctx->c_ctx.c_mode;
  1675. int ret = 0;
  1676. switch (c_mode) {
  1677. case SEC_CMODE_XTS:
  1678. if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
  1679. dev_err(dev, "skcipher XTS mode input length error!\n");
  1680. ret = -EINVAL;
  1681. }
  1682. break;
  1683. case SEC_CMODE_ECB:
  1684. case SEC_CMODE_CBC:
  1685. if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
  1686. dev_err(dev, "skcipher AES input length error!\n");
  1687. ret = -EINVAL;
  1688. }
  1689. break;
  1690. case SEC_CMODE_CFB:
  1691. case SEC_CMODE_OFB:
  1692. case SEC_CMODE_CTR:
  1693. if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
  1694. dev_err(dev, "skcipher HW version error!\n");
  1695. ret = -EINVAL;
  1696. }
  1697. break;
  1698. default:
  1699. ret = -EINVAL;
  1700. }
  1701. return ret;
  1702. }
  1703. static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
  1704. {
  1705. struct skcipher_request *sk_req = sreq->c_req.sk_req;
  1706. struct device *dev = ctx->dev;
  1707. u8 c_alg = ctx->c_ctx.c_alg;
  1708. if (unlikely(!sk_req->src || !sk_req->dst ||
  1709. sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
  1710. dev_err(dev, "skcipher input param error!\n");
  1711. return -EINVAL;
  1712. }
  1713. sreq->c_req.c_len = sk_req->cryptlen;
  1714. if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
  1715. sreq->use_pbuf = true;
  1716. else
  1717. sreq->use_pbuf = false;
  1718. if (c_alg == SEC_CALG_3DES) {
  1719. if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
  1720. dev_err(dev, "skcipher 3des input length error!\n");
  1721. return -EINVAL;
  1722. }
  1723. return 0;
  1724. } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
  1725. return sec_skcipher_cryptlen_ckeck(ctx, sreq);
  1726. }
  1727. dev_err(dev, "skcipher algorithm error!\n");
  1728. return -EINVAL;
  1729. }
  1730. static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
  1731. struct skcipher_request *sreq, bool encrypt)
  1732. {
  1733. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  1734. SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
  1735. struct device *dev = ctx->dev;
  1736. int ret;
  1737. if (!c_ctx->fbtfm) {
  1738. dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
  1739. return -EINVAL;
  1740. }
  1741. skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
  1742. /* software need sync mode to do crypto */
  1743. skcipher_request_set_callback(subreq, sreq->base.flags,
  1744. NULL, NULL);
  1745. skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
  1746. sreq->cryptlen, sreq->iv);
  1747. if (encrypt)
  1748. ret = crypto_skcipher_encrypt(subreq);
  1749. else
  1750. ret = crypto_skcipher_decrypt(subreq);
  1751. skcipher_request_zero(subreq);
  1752. return ret;
  1753. }
  1754. static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
  1755. {
  1756. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
  1757. struct sec_req *req = skcipher_request_ctx(sk_req);
  1758. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  1759. int ret;
  1760. if (!sk_req->cryptlen) {
  1761. if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
  1762. return -EINVAL;
  1763. return 0;
  1764. }
  1765. req->flag = sk_req->base.flags;
  1766. req->c_req.sk_req = sk_req;
  1767. req->c_req.encrypt = encrypt;
  1768. req->ctx = ctx;
  1769. ret = sec_skcipher_param_check(ctx, req);
  1770. if (unlikely(ret))
  1771. return -EINVAL;
  1772. if (unlikely(ctx->c_ctx.fallback))
  1773. return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
  1774. return ctx->req_op->process(ctx, req);
  1775. }
  1776. static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
  1777. {
  1778. return sec_skcipher_crypto(sk_req, true);
  1779. }
  1780. static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
  1781. {
  1782. return sec_skcipher_crypto(sk_req, false);
  1783. }
  1784. #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
  1785. sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
  1786. {\
  1787. .base = {\
  1788. .cra_name = sec_cra_name,\
  1789. .cra_driver_name = "hisi_sec_"sec_cra_name,\
  1790. .cra_priority = SEC_PRIORITY,\
  1791. .cra_flags = CRYPTO_ALG_ASYNC |\
  1792. CRYPTO_ALG_NEED_FALLBACK,\
  1793. .cra_blocksize = blk_size,\
  1794. .cra_ctxsize = sizeof(struct sec_ctx),\
  1795. .cra_module = THIS_MODULE,\
  1796. },\
  1797. .init = ctx_init,\
  1798. .exit = ctx_exit,\
  1799. .setkey = sec_set_key,\
  1800. .decrypt = sec_skcipher_decrypt,\
  1801. .encrypt = sec_skcipher_encrypt,\
  1802. .min_keysize = sec_min_key_size,\
  1803. .max_keysize = sec_max_key_size,\
  1804. .ivsize = iv_size,\
  1805. }
  1806. #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
  1807. max_key_size, blk_size, iv_size) \
  1808. SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
  1809. sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
  1810. static struct sec_skcipher sec_skciphers[] = {
  1811. {
  1812. .alg_msk = BIT(0),
  1813. .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
  1814. AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
  1815. },
  1816. {
  1817. .alg_msk = BIT(1),
  1818. .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
  1819. AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
  1820. },
  1821. {
  1822. .alg_msk = BIT(2),
  1823. .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,
  1824. AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
  1825. },
  1826. {
  1827. .alg_msk = BIT(3),
  1828. .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
  1829. SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
  1830. },
  1831. {
  1832. .alg_msk = BIT(4),
  1833. .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
  1834. AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
  1835. },
  1836. {
  1837. .alg_msk = BIT(5),
  1838. .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
  1839. AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
  1840. },
  1841. {
  1842. .alg_msk = BIT(12),
  1843. .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
  1844. AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
  1845. },
  1846. {
  1847. .alg_msk = BIT(13),
  1848. .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
  1849. AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
  1850. },
  1851. {
  1852. .alg_msk = BIT(14),
  1853. .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
  1854. SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
  1855. },
  1856. {
  1857. .alg_msk = BIT(15),
  1858. .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
  1859. AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
  1860. },
  1861. {
  1862. .alg_msk = BIT(16),
  1863. .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
  1864. AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
  1865. },
  1866. {
  1867. .alg_msk = BIT(23),
  1868. .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
  1869. SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
  1870. },
  1871. {
  1872. .alg_msk = BIT(24),
  1873. .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
  1874. SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
  1875. DES3_EDE_BLOCK_SIZE),
  1876. },
  1877. };
  1878. static int aead_iv_demension_check(struct aead_request *aead_req)
  1879. {
  1880. u8 cl;
  1881. cl = aead_req->iv[0] + 1;
  1882. if (cl < IV_CL_MIN || cl > IV_CL_MAX)
  1883. return -EINVAL;
  1884. if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
  1885. return -EOVERFLOW;
  1886. return 0;
  1887. }
  1888. static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
  1889. {
  1890. struct aead_request *req = sreq->aead_req.aead_req;
  1891. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1892. size_t authsize = crypto_aead_authsize(tfm);
  1893. u8 c_mode = ctx->c_ctx.c_mode;
  1894. struct device *dev = ctx->dev;
  1895. int ret;
  1896. if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
  1897. req->assoclen > SEC_MAX_AAD_LEN)) {
  1898. dev_err(dev, "aead input spec error!\n");
  1899. return -EINVAL;
  1900. }
  1901. if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
  1902. (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
  1903. authsize & MAC_LEN_MASK)))) {
  1904. dev_err(dev, "aead input mac length error!\n");
  1905. return -EINVAL;
  1906. }
  1907. if (c_mode == SEC_CMODE_CCM) {
  1908. if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
  1909. dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
  1910. return -EINVAL;
  1911. }
  1912. ret = aead_iv_demension_check(req);
  1913. if (ret) {
  1914. dev_err(dev, "aead input iv param error!\n");
  1915. return ret;
  1916. }
  1917. }
  1918. if (sreq->c_req.encrypt)
  1919. sreq->c_req.c_len = req->cryptlen;
  1920. else
  1921. sreq->c_req.c_len = req->cryptlen - authsize;
  1922. if (c_mode == SEC_CMODE_CBC) {
  1923. if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
  1924. dev_err(dev, "aead crypto length error!\n");
  1925. return -EINVAL;
  1926. }
  1927. }
  1928. return 0;
  1929. }
  1930. static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
  1931. {
  1932. struct aead_request *req = sreq->aead_req.aead_req;
  1933. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1934. size_t authsize = crypto_aead_authsize(tfm);
  1935. struct device *dev = ctx->dev;
  1936. u8 c_alg = ctx->c_ctx.c_alg;
  1937. if (unlikely(!req->src || !req->dst)) {
  1938. dev_err(dev, "aead input param error!\n");
  1939. return -EINVAL;
  1940. }
  1941. if (ctx->sec->qm.ver == QM_HW_V2) {
  1942. if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
  1943. req->cryptlen <= authsize))) {
  1944. ctx->a_ctx.fallback = true;
  1945. return -EINVAL;
  1946. }
  1947. }
  1948. /* Support AES or SM4 */
  1949. if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
  1950. dev_err(dev, "aead crypto alg error!\n");
  1951. return -EINVAL;
  1952. }
  1953. if (unlikely(sec_aead_spec_check(ctx, sreq)))
  1954. return -EINVAL;
  1955. if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
  1956. SEC_PBUF_SZ)
  1957. sreq->use_pbuf = true;
  1958. else
  1959. sreq->use_pbuf = false;
  1960. return 0;
  1961. }
  1962. static int sec_aead_soft_crypto(struct sec_ctx *ctx,
  1963. struct aead_request *aead_req,
  1964. bool encrypt)
  1965. {
  1966. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  1967. struct device *dev = ctx->dev;
  1968. struct aead_request *subreq;
  1969. int ret;
  1970. /* Kunpeng920 aead mode not support input 0 size */
  1971. if (!a_ctx->fallback_aead_tfm) {
  1972. dev_err(dev, "aead fallback tfm is NULL!\n");
  1973. return -EINVAL;
  1974. }
  1975. subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
  1976. if (!subreq)
  1977. return -ENOMEM;
  1978. aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
  1979. aead_request_set_callback(subreq, aead_req->base.flags,
  1980. aead_req->base.complete, aead_req->base.data);
  1981. aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
  1982. aead_req->cryptlen, aead_req->iv);
  1983. aead_request_set_ad(subreq, aead_req->assoclen);
  1984. if (encrypt)
  1985. ret = crypto_aead_encrypt(subreq);
  1986. else
  1987. ret = crypto_aead_decrypt(subreq);
  1988. aead_request_free(subreq);
  1989. return ret;
  1990. }
  1991. static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
  1992. {
  1993. struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
  1994. struct sec_req *req = aead_request_ctx(a_req);
  1995. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1996. int ret;
  1997. req->flag = a_req->base.flags;
  1998. req->aead_req.aead_req = a_req;
  1999. req->c_req.encrypt = encrypt;
  2000. req->ctx = ctx;
  2001. ret = sec_aead_param_check(ctx, req);
  2002. if (unlikely(ret)) {
  2003. if (ctx->a_ctx.fallback)
  2004. return sec_aead_soft_crypto(ctx, a_req, encrypt);
  2005. return -EINVAL;
  2006. }
  2007. return ctx->req_op->process(ctx, req);
  2008. }
  2009. static int sec_aead_encrypt(struct aead_request *a_req)
  2010. {
  2011. return sec_aead_crypto(a_req, true);
  2012. }
  2013. static int sec_aead_decrypt(struct aead_request *a_req)
  2014. {
  2015. return sec_aead_crypto(a_req, false);
  2016. }
  2017. #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
  2018. ctx_exit, blk_size, iv_size, max_authsize)\
  2019. {\
  2020. .base = {\
  2021. .cra_name = sec_cra_name,\
  2022. .cra_driver_name = "hisi_sec_"sec_cra_name,\
  2023. .cra_priority = SEC_PRIORITY,\
  2024. .cra_flags = CRYPTO_ALG_ASYNC |\
  2025. CRYPTO_ALG_NEED_FALLBACK,\
  2026. .cra_blocksize = blk_size,\
  2027. .cra_ctxsize = sizeof(struct sec_ctx),\
  2028. .cra_module = THIS_MODULE,\
  2029. },\
  2030. .init = ctx_init,\
  2031. .exit = ctx_exit,\
  2032. .setkey = sec_set_key,\
  2033. .setauthsize = sec_aead_setauthsize,\
  2034. .decrypt = sec_aead_decrypt,\
  2035. .encrypt = sec_aead_encrypt,\
  2036. .ivsize = iv_size,\
  2037. .maxauthsize = max_authsize,\
  2038. }
  2039. static struct sec_aead sec_aeads[] = {
  2040. {
  2041. .alg_msk = BIT(6),
  2042. .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
  2043. sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
  2044. AES_BLOCK_SIZE),
  2045. },
  2046. {
  2047. .alg_msk = BIT(7),
  2048. .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
  2049. sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
  2050. AES_BLOCK_SIZE),
  2051. },
  2052. {
  2053. .alg_msk = BIT(17),
  2054. .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
  2055. sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
  2056. AES_BLOCK_SIZE),
  2057. },
  2058. {
  2059. .alg_msk = BIT(18),
  2060. .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
  2061. sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
  2062. AES_BLOCK_SIZE),
  2063. },
  2064. {
  2065. .alg_msk = BIT(43),
  2066. .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
  2067. sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
  2068. AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
  2069. },
  2070. {
  2071. .alg_msk = BIT(44),
  2072. .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
  2073. sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
  2074. AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
  2075. },
  2076. {
  2077. .alg_msk = BIT(45),
  2078. .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
  2079. sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
  2080. AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
  2081. },
  2082. };
  2083. static void sec_unregister_skcipher(u64 alg_mask, int end)
  2084. {
  2085. int i;
  2086. for (i = 0; i < end; i++)
  2087. if (sec_skciphers[i].alg_msk & alg_mask)
  2088. crypto_unregister_skcipher(&sec_skciphers[i].alg);
  2089. }
  2090. static int sec_register_skcipher(u64 alg_mask)
  2091. {
  2092. int i, ret, count;
  2093. count = ARRAY_SIZE(sec_skciphers);
  2094. for (i = 0; i < count; i++) {
  2095. if (!(sec_skciphers[i].alg_msk & alg_mask))
  2096. continue;
  2097. ret = crypto_register_skcipher(&sec_skciphers[i].alg);
  2098. if (ret)
  2099. goto err;
  2100. }
  2101. return 0;
  2102. err:
  2103. sec_unregister_skcipher(alg_mask, i);
  2104. return ret;
  2105. }
  2106. static void sec_unregister_aead(u64 alg_mask, int end)
  2107. {
  2108. int i;
  2109. for (i = 0; i < end; i++)
  2110. if (sec_aeads[i].alg_msk & alg_mask)
  2111. crypto_unregister_aead(&sec_aeads[i].alg);
  2112. }
  2113. static int sec_register_aead(u64 alg_mask)
  2114. {
  2115. int i, ret, count;
  2116. count = ARRAY_SIZE(sec_aeads);
  2117. for (i = 0; i < count; i++) {
  2118. if (!(sec_aeads[i].alg_msk & alg_mask))
  2119. continue;
  2120. ret = crypto_register_aead(&sec_aeads[i].alg);
  2121. if (ret)
  2122. goto err;
  2123. }
  2124. return 0;
  2125. err:
  2126. sec_unregister_aead(alg_mask, i);
  2127. return ret;
  2128. }
  2129. int sec_register_to_crypto(struct hisi_qm *qm)
  2130. {
  2131. u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
  2132. int ret;
  2133. ret = sec_register_skcipher(alg_mask);
  2134. if (ret)
  2135. return ret;
  2136. ret = sec_register_aead(alg_mask);
  2137. if (ret)
  2138. sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
  2139. return ret;
  2140. }
  2141. void sec_unregister_from_crypto(struct hisi_qm *qm)
  2142. {
  2143. u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
  2144. sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
  2145. sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
  2146. }