cc_hash.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <crypto/algapi.h>
  6. #include <crypto/hash.h>
  7. #include <crypto/md5.h>
  8. #include <crypto/sm3.h>
  9. #include <crypto/internal/hash.h>
  10. #include "cc_driver.h"
  11. #include "cc_request_mgr.h"
  12. #include "cc_buffer_mgr.h"
  13. #include "cc_hash.h"
  14. #include "cc_sram_mgr.h"
  15. #define CC_MAX_HASH_SEQ_LEN 12
  16. #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
  17. #define CC_SM3_HASH_LEN_SIZE 8
  18. struct cc_hash_handle {
  19. u32 digest_len_sram_addr; /* const value in SRAM*/
  20. u32 larval_digest_sram_addr; /* const value in SRAM */
  21. struct list_head hash_list;
  22. };
  23. static const u32 cc_digest_len_init[] = {
  24. 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
  25. static const u32 cc_md5_init[] = {
  26. SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  27. static const u32 cc_sha1_init[] = {
  28. SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  29. static const u32 cc_sha224_init[] = {
  30. SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
  31. SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
  32. static const u32 cc_sha256_init[] = {
  33. SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
  34. SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
  35. static const u32 cc_digest_len_sha512_init[] = {
  36. 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
  37. /*
  38. * Due to the way the HW works, every double word in the SHA384 and SHA512
  39. * larval hashes must be stored in hi/lo order
  40. */
  41. #define hilo(x) upper_32_bits(x), lower_32_bits(x)
  42. static const u32 cc_sha384_init[] = {
  43. hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
  44. hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
  45. static const u32 cc_sha512_init[] = {
  46. hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
  47. hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
  48. static const u32 cc_sm3_init[] = {
  49. SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
  50. SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
  51. static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
  52. unsigned int *seq_size);
  53. static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
  54. unsigned int *seq_size);
  55. static const void *cc_larval_digest(struct device *dev, u32 mode);
  56. struct cc_hash_alg {
  57. struct list_head entry;
  58. int hash_mode;
  59. int hw_mode;
  60. int inter_digestsize;
  61. struct cc_drvdata *drvdata;
  62. struct ahash_alg ahash_alg;
  63. };
  64. struct hash_key_req_ctx {
  65. u32 keylen;
  66. dma_addr_t key_dma_addr;
  67. u8 *key;
  68. };
  69. /* hash per-session context */
  70. struct cc_hash_ctx {
  71. struct cc_drvdata *drvdata;
  72. /* holds the origin digest; the digest after "setkey" if HMAC,*
  73. * the initial digest if HASH.
  74. */
  75. u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
  76. u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
  77. dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
  78. dma_addr_t digest_buff_dma_addr;
  79. /* use for hmac with key large then mode block size */
  80. struct hash_key_req_ctx key_params;
  81. int hash_mode;
  82. int hw_mode;
  83. int inter_digestsize;
  84. unsigned int hash_len;
  85. struct completion setkey_comp;
  86. bool is_hmac;
  87. };
  88. static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
  89. unsigned int flow_mode, struct cc_hw_desc desc[],
  90. bool is_not_last_data, unsigned int *seq_size);
  91. static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
  92. {
  93. if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
  94. mode == DRV_HASH_SHA512) {
  95. set_bytes_swap(desc, 1);
  96. } else {
  97. set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
  98. }
  99. }
  100. static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
  101. unsigned int digestsize)
  102. {
  103. state->digest_result_dma_addr =
  104. dma_map_single(dev, state->digest_result_buff,
  105. digestsize, DMA_BIDIRECTIONAL);
  106. if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
  107. dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
  108. digestsize);
  109. return -ENOMEM;
  110. }
  111. dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
  112. digestsize, state->digest_result_buff,
  113. &state->digest_result_dma_addr);
  114. return 0;
  115. }
  116. static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
  117. struct cc_hash_ctx *ctx)
  118. {
  119. bool is_hmac = ctx->is_hmac;
  120. memset(state, 0, sizeof(*state));
  121. if (is_hmac) {
  122. if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
  123. ctx->hw_mode != DRV_CIPHER_CMAC) {
  124. dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
  125. ctx->inter_digestsize,
  126. DMA_BIDIRECTIONAL);
  127. memcpy(state->digest_buff, ctx->digest_buff,
  128. ctx->inter_digestsize);
  129. if (ctx->hash_mode == DRV_HASH_SHA512 ||
  130. ctx->hash_mode == DRV_HASH_SHA384)
  131. memcpy(state->digest_bytes_len,
  132. cc_digest_len_sha512_init,
  133. ctx->hash_len);
  134. else
  135. memcpy(state->digest_bytes_len,
  136. cc_digest_len_init,
  137. ctx->hash_len);
  138. }
  139. if (ctx->hash_mode != DRV_HASH_NULL) {
  140. dma_sync_single_for_cpu(dev,
  141. ctx->opad_tmp_keys_dma_addr,
  142. ctx->inter_digestsize,
  143. DMA_BIDIRECTIONAL);
  144. memcpy(state->opad_digest_buff,
  145. ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
  146. }
  147. } else { /*hash*/
  148. /* Copy the initial digests if hash flow. */
  149. const void *larval = cc_larval_digest(dev, ctx->hash_mode);
  150. memcpy(state->digest_buff, larval, ctx->inter_digestsize);
  151. }
  152. }
  153. static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
  154. struct cc_hash_ctx *ctx)
  155. {
  156. bool is_hmac = ctx->is_hmac;
  157. state->digest_buff_dma_addr =
  158. dma_map_single(dev, state->digest_buff,
  159. ctx->inter_digestsize, DMA_BIDIRECTIONAL);
  160. if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
  161. dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
  162. ctx->inter_digestsize, state->digest_buff);
  163. return -EINVAL;
  164. }
  165. dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
  166. ctx->inter_digestsize, state->digest_buff,
  167. &state->digest_buff_dma_addr);
  168. if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
  169. state->digest_bytes_len_dma_addr =
  170. dma_map_single(dev, state->digest_bytes_len,
  171. HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
  172. if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
  173. dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
  174. HASH_MAX_LEN_SIZE, state->digest_bytes_len);
  175. goto unmap_digest_buf;
  176. }
  177. dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
  178. HASH_MAX_LEN_SIZE, state->digest_bytes_len,
  179. &state->digest_bytes_len_dma_addr);
  180. }
  181. if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
  182. state->opad_digest_dma_addr =
  183. dma_map_single(dev, state->opad_digest_buff,
  184. ctx->inter_digestsize,
  185. DMA_BIDIRECTIONAL);
  186. if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
  187. dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
  188. ctx->inter_digestsize,
  189. state->opad_digest_buff);
  190. goto unmap_digest_len;
  191. }
  192. dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
  193. ctx->inter_digestsize, state->opad_digest_buff,
  194. &state->opad_digest_dma_addr);
  195. }
  196. return 0;
  197. unmap_digest_len:
  198. if (state->digest_bytes_len_dma_addr) {
  199. dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
  200. HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
  201. state->digest_bytes_len_dma_addr = 0;
  202. }
  203. unmap_digest_buf:
  204. if (state->digest_buff_dma_addr) {
  205. dma_unmap_single(dev, state->digest_buff_dma_addr,
  206. ctx->inter_digestsize, DMA_BIDIRECTIONAL);
  207. state->digest_buff_dma_addr = 0;
  208. }
  209. return -EINVAL;
  210. }
  211. static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
  212. struct cc_hash_ctx *ctx)
  213. {
  214. if (state->digest_buff_dma_addr) {
  215. dma_unmap_single(dev, state->digest_buff_dma_addr,
  216. ctx->inter_digestsize, DMA_BIDIRECTIONAL);
  217. dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
  218. &state->digest_buff_dma_addr);
  219. state->digest_buff_dma_addr = 0;
  220. }
  221. if (state->digest_bytes_len_dma_addr) {
  222. dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
  223. HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
  224. dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
  225. &state->digest_bytes_len_dma_addr);
  226. state->digest_bytes_len_dma_addr = 0;
  227. }
  228. if (state->opad_digest_dma_addr) {
  229. dma_unmap_single(dev, state->opad_digest_dma_addr,
  230. ctx->inter_digestsize, DMA_BIDIRECTIONAL);
  231. dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
  232. &state->opad_digest_dma_addr);
  233. state->opad_digest_dma_addr = 0;
  234. }
  235. }
  236. static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
  237. unsigned int digestsize, u8 *result)
  238. {
  239. if (state->digest_result_dma_addr) {
  240. dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
  241. DMA_BIDIRECTIONAL);
  242. dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
  243. state->digest_result_buff,
  244. &state->digest_result_dma_addr, digestsize);
  245. memcpy(result, state->digest_result_buff, digestsize);
  246. }
  247. state->digest_result_dma_addr = 0;
  248. }
  249. static void cc_update_complete(struct device *dev, void *cc_req, int err)
  250. {
  251. struct ahash_request *req = (struct ahash_request *)cc_req;
  252. struct ahash_req_ctx *state = ahash_request_ctx(req);
  253. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  254. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  255. dev_dbg(dev, "req=%pK\n", req);
  256. if (err != -EINPROGRESS) {
  257. /* Not a BACKLOG notification */
  258. cc_unmap_hash_request(dev, state, req->src, false);
  259. cc_unmap_req(dev, state, ctx);
  260. }
  261. ahash_request_complete(req, err);
  262. }
  263. static void cc_digest_complete(struct device *dev, void *cc_req, int err)
  264. {
  265. struct ahash_request *req = (struct ahash_request *)cc_req;
  266. struct ahash_req_ctx *state = ahash_request_ctx(req);
  267. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  268. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  269. u32 digestsize = crypto_ahash_digestsize(tfm);
  270. dev_dbg(dev, "req=%pK\n", req);
  271. if (err != -EINPROGRESS) {
  272. /* Not a BACKLOG notification */
  273. cc_unmap_hash_request(dev, state, req->src, false);
  274. cc_unmap_result(dev, state, digestsize, req->result);
  275. cc_unmap_req(dev, state, ctx);
  276. }
  277. ahash_request_complete(req, err);
  278. }
  279. static void cc_hash_complete(struct device *dev, void *cc_req, int err)
  280. {
  281. struct ahash_request *req = (struct ahash_request *)cc_req;
  282. struct ahash_req_ctx *state = ahash_request_ctx(req);
  283. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  284. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  285. u32 digestsize = crypto_ahash_digestsize(tfm);
  286. dev_dbg(dev, "req=%pK\n", req);
  287. if (err != -EINPROGRESS) {
  288. /* Not a BACKLOG notification */
  289. cc_unmap_hash_request(dev, state, req->src, false);
  290. cc_unmap_result(dev, state, digestsize, req->result);
  291. cc_unmap_req(dev, state, ctx);
  292. }
  293. ahash_request_complete(req, err);
  294. }
  295. static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
  296. int idx)
  297. {
  298. struct ahash_req_ctx *state = ahash_request_ctx(req);
  299. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  300. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  301. u32 digestsize = crypto_ahash_digestsize(tfm);
  302. /* Get final MAC result */
  303. hw_desc_init(&desc[idx]);
  304. set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
  305. set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
  306. NS_BIT, 1);
  307. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  308. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  309. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  310. set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
  311. cc_set_endianity(ctx->hash_mode, &desc[idx]);
  312. idx++;
  313. return idx;
  314. }
  315. static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
  316. int idx)
  317. {
  318. struct ahash_req_ctx *state = ahash_request_ctx(req);
  319. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  320. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  321. u32 digestsize = crypto_ahash_digestsize(tfm);
  322. /* store the hash digest result in the context */
  323. hw_desc_init(&desc[idx]);
  324. set_cipher_mode(&desc[idx], ctx->hw_mode);
  325. set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
  326. NS_BIT, 0);
  327. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  328. cc_set_endianity(ctx->hash_mode, &desc[idx]);
  329. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  330. idx++;
  331. /* Loading hash opad xor key state */
  332. hw_desc_init(&desc[idx]);
  333. set_cipher_mode(&desc[idx], ctx->hw_mode);
  334. set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
  335. ctx->inter_digestsize, NS_BIT);
  336. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  337. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  338. idx++;
  339. /* Load the hash current length */
  340. hw_desc_init(&desc[idx]);
  341. set_cipher_mode(&desc[idx], ctx->hw_mode);
  342. set_din_sram(&desc[idx],
  343. cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
  344. ctx->hash_len);
  345. set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
  346. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  347. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  348. idx++;
  349. /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
  350. hw_desc_init(&desc[idx]);
  351. set_din_no_dma(&desc[idx], 0, 0xfffff0);
  352. set_dout_no_dma(&desc[idx], 0, 0, 1);
  353. idx++;
  354. /* Perform HASH update */
  355. hw_desc_init(&desc[idx]);
  356. set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
  357. digestsize, NS_BIT);
  358. set_flow_mode(&desc[idx], DIN_HASH);
  359. idx++;
  360. return idx;
  361. }
  362. static int cc_hash_digest(struct ahash_request *req)
  363. {
  364. struct ahash_req_ctx *state = ahash_request_ctx(req);
  365. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  366. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  367. u32 digestsize = crypto_ahash_digestsize(tfm);
  368. struct scatterlist *src = req->src;
  369. unsigned int nbytes = req->nbytes;
  370. u8 *result = req->result;
  371. struct device *dev = drvdata_to_dev(ctx->drvdata);
  372. bool is_hmac = ctx->is_hmac;
  373. struct cc_crypto_req cc_req = {};
  374. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  375. u32 larval_digest_addr;
  376. int idx = 0;
  377. int rc = 0;
  378. gfp_t flags = cc_gfp_flags(&req->base);
  379. dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
  380. nbytes);
  381. cc_init_req(dev, state, ctx);
  382. if (cc_map_req(dev, state, ctx)) {
  383. dev_err(dev, "map_ahash_source() failed\n");
  384. return -ENOMEM;
  385. }
  386. if (cc_map_result(dev, state, digestsize)) {
  387. dev_err(dev, "map_ahash_digest() failed\n");
  388. cc_unmap_req(dev, state, ctx);
  389. return -ENOMEM;
  390. }
  391. if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
  392. flags)) {
  393. dev_err(dev, "map_ahash_request_final() failed\n");
  394. cc_unmap_result(dev, state, digestsize, result);
  395. cc_unmap_req(dev, state, ctx);
  396. return -ENOMEM;
  397. }
  398. /* Setup request structure */
  399. cc_req.user_cb = cc_digest_complete;
  400. cc_req.user_arg = req;
  401. /* If HMAC then load hash IPAD xor key, if HASH then load initial
  402. * digest
  403. */
  404. hw_desc_init(&desc[idx]);
  405. set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
  406. if (is_hmac) {
  407. set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
  408. ctx->inter_digestsize, NS_BIT);
  409. } else {
  410. larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
  411. ctx->hash_mode);
  412. set_din_sram(&desc[idx], larval_digest_addr,
  413. ctx->inter_digestsize);
  414. }
  415. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  416. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  417. idx++;
  418. /* Load the hash current length */
  419. hw_desc_init(&desc[idx]);
  420. set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
  421. if (is_hmac) {
  422. set_din_type(&desc[idx], DMA_DLLI,
  423. state->digest_bytes_len_dma_addr,
  424. ctx->hash_len, NS_BIT);
  425. } else {
  426. set_din_const(&desc[idx], 0, ctx->hash_len);
  427. if (nbytes)
  428. set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
  429. else
  430. set_cipher_do(&desc[idx], DO_PAD);
  431. }
  432. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  433. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  434. idx++;
  435. cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
  436. if (is_hmac) {
  437. /* HW last hash block padding (aka. "DO_PAD") */
  438. hw_desc_init(&desc[idx]);
  439. set_cipher_mode(&desc[idx], ctx->hw_mode);
  440. set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
  441. ctx->hash_len, NS_BIT, 0);
  442. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  443. set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
  444. set_cipher_do(&desc[idx], DO_PAD);
  445. idx++;
  446. idx = cc_fin_hmac(desc, req, idx);
  447. }
  448. idx = cc_fin_result(desc, req, idx);
  449. rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
  450. if (rc != -EINPROGRESS && rc != -EBUSY) {
  451. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  452. cc_unmap_hash_request(dev, state, src, true);
  453. cc_unmap_result(dev, state, digestsize, result);
  454. cc_unmap_req(dev, state, ctx);
  455. }
  456. return rc;
  457. }
  458. static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
  459. struct ahash_req_ctx *state, unsigned int idx)
  460. {
  461. /* Restore hash digest */
  462. hw_desc_init(&desc[idx]);
  463. set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
  464. set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
  465. ctx->inter_digestsize, NS_BIT);
  466. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  467. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  468. idx++;
  469. /* Restore hash current length */
  470. hw_desc_init(&desc[idx]);
  471. set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
  472. set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
  473. set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
  474. ctx->hash_len, NS_BIT);
  475. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  476. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  477. idx++;
  478. cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
  479. return idx;
  480. }
  481. static int cc_hash_update(struct ahash_request *req)
  482. {
  483. struct ahash_req_ctx *state = ahash_request_ctx(req);
  484. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  485. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  486. unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
  487. struct scatterlist *src = req->src;
  488. unsigned int nbytes = req->nbytes;
  489. struct device *dev = drvdata_to_dev(ctx->drvdata);
  490. struct cc_crypto_req cc_req = {};
  491. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  492. u32 idx = 0;
  493. int rc;
  494. gfp_t flags = cc_gfp_flags(&req->base);
  495. dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
  496. "hmac" : "hash", nbytes);
  497. if (nbytes == 0) {
  498. /* no real updates required */
  499. return 0;
  500. }
  501. rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
  502. block_size, flags);
  503. if (rc) {
  504. if (rc == 1) {
  505. dev_dbg(dev, " data size not require HW update %x\n",
  506. nbytes);
  507. /* No hardware updates are required */
  508. return 0;
  509. }
  510. dev_err(dev, "map_ahash_request_update() failed\n");
  511. return -ENOMEM;
  512. }
  513. if (cc_map_req(dev, state, ctx)) {
  514. dev_err(dev, "map_ahash_source() failed\n");
  515. cc_unmap_hash_request(dev, state, src, true);
  516. return -EINVAL;
  517. }
  518. /* Setup request structure */
  519. cc_req.user_cb = cc_update_complete;
  520. cc_req.user_arg = req;
  521. idx = cc_restore_hash(desc, ctx, state, idx);
  522. /* store the hash digest result in context */
  523. hw_desc_init(&desc[idx]);
  524. set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
  525. set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
  526. ctx->inter_digestsize, NS_BIT, 0);
  527. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  528. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  529. idx++;
  530. /* store current hash length in context */
  531. hw_desc_init(&desc[idx]);
  532. set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
  533. set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
  534. ctx->hash_len, NS_BIT, 1);
  535. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  536. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  537. set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
  538. idx++;
  539. rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
  540. if (rc != -EINPROGRESS && rc != -EBUSY) {
  541. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  542. cc_unmap_hash_request(dev, state, src, true);
  543. cc_unmap_req(dev, state, ctx);
  544. }
  545. return rc;
  546. }
  547. static int cc_do_finup(struct ahash_request *req, bool update)
  548. {
  549. struct ahash_req_ctx *state = ahash_request_ctx(req);
  550. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  551. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  552. u32 digestsize = crypto_ahash_digestsize(tfm);
  553. struct scatterlist *src = req->src;
  554. unsigned int nbytes = req->nbytes;
  555. u8 *result = req->result;
  556. struct device *dev = drvdata_to_dev(ctx->drvdata);
  557. bool is_hmac = ctx->is_hmac;
  558. struct cc_crypto_req cc_req = {};
  559. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  560. unsigned int idx = 0;
  561. int rc;
  562. gfp_t flags = cc_gfp_flags(&req->base);
  563. dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
  564. update ? "finup" : "final", nbytes);
  565. if (cc_map_req(dev, state, ctx)) {
  566. dev_err(dev, "map_ahash_source() failed\n");
  567. return -EINVAL;
  568. }
  569. if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
  570. flags)) {
  571. dev_err(dev, "map_ahash_request_final() failed\n");
  572. cc_unmap_req(dev, state, ctx);
  573. return -ENOMEM;
  574. }
  575. if (cc_map_result(dev, state, digestsize)) {
  576. dev_err(dev, "map_ahash_digest() failed\n");
  577. cc_unmap_hash_request(dev, state, src, true);
  578. cc_unmap_req(dev, state, ctx);
  579. return -ENOMEM;
  580. }
  581. /* Setup request structure */
  582. cc_req.user_cb = cc_hash_complete;
  583. cc_req.user_arg = req;
  584. idx = cc_restore_hash(desc, ctx, state, idx);
  585. /* Pad the hash */
  586. hw_desc_init(&desc[idx]);
  587. set_cipher_do(&desc[idx], DO_PAD);
  588. set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
  589. set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
  590. ctx->hash_len, NS_BIT, 0);
  591. set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
  592. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  593. idx++;
  594. if (is_hmac)
  595. idx = cc_fin_hmac(desc, req, idx);
  596. idx = cc_fin_result(desc, req, idx);
  597. rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
  598. if (rc != -EINPROGRESS && rc != -EBUSY) {
  599. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  600. cc_unmap_hash_request(dev, state, src, true);
  601. cc_unmap_result(dev, state, digestsize, result);
  602. cc_unmap_req(dev, state, ctx);
  603. }
  604. return rc;
  605. }
  606. static int cc_hash_finup(struct ahash_request *req)
  607. {
  608. return cc_do_finup(req, true);
  609. }
  610. static int cc_hash_final(struct ahash_request *req)
  611. {
  612. return cc_do_finup(req, false);
  613. }
  614. static int cc_hash_init(struct ahash_request *req)
  615. {
  616. struct ahash_req_ctx *state = ahash_request_ctx(req);
  617. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  618. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  619. struct device *dev = drvdata_to_dev(ctx->drvdata);
  620. dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
  621. cc_init_req(dev, state, ctx);
  622. return 0;
  623. }
  624. static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
  625. unsigned int keylen)
  626. {
  627. unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
  628. struct cc_crypto_req cc_req = {};
  629. struct cc_hash_ctx *ctx = NULL;
  630. int blocksize = 0;
  631. int digestsize = 0;
  632. int i, idx = 0, rc = 0;
  633. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  634. u32 larval_addr;
  635. struct device *dev;
  636. ctx = crypto_ahash_ctx(ahash);
  637. dev = drvdata_to_dev(ctx->drvdata);
  638. dev_dbg(dev, "start keylen: %d", keylen);
  639. blocksize = crypto_tfm_alg_blocksize(&ahash->base);
  640. digestsize = crypto_ahash_digestsize(ahash);
  641. larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
  642. /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
  643. * any NON-ZERO value utilizes HMAC flow
  644. */
  645. ctx->key_params.keylen = keylen;
  646. ctx->key_params.key_dma_addr = 0;
  647. ctx->is_hmac = true;
  648. ctx->key_params.key = NULL;
  649. if (keylen) {
  650. ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
  651. if (!ctx->key_params.key)
  652. return -ENOMEM;
  653. ctx->key_params.key_dma_addr =
  654. dma_map_single(dev, ctx->key_params.key, keylen,
  655. DMA_TO_DEVICE);
  656. if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
  657. dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
  658. ctx->key_params.key, keylen);
  659. kfree_sensitive(ctx->key_params.key);
  660. return -ENOMEM;
  661. }
  662. dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
  663. &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
  664. if (keylen > blocksize) {
  665. /* Load hash initial state */
  666. hw_desc_init(&desc[idx]);
  667. set_cipher_mode(&desc[idx], ctx->hw_mode);
  668. set_din_sram(&desc[idx], larval_addr,
  669. ctx->inter_digestsize);
  670. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  671. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  672. idx++;
  673. /* Load the hash current length*/
  674. hw_desc_init(&desc[idx]);
  675. set_cipher_mode(&desc[idx], ctx->hw_mode);
  676. set_din_const(&desc[idx], 0, ctx->hash_len);
  677. set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
  678. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  679. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  680. idx++;
  681. hw_desc_init(&desc[idx]);
  682. set_din_type(&desc[idx], DMA_DLLI,
  683. ctx->key_params.key_dma_addr, keylen,
  684. NS_BIT);
  685. set_flow_mode(&desc[idx], DIN_HASH);
  686. idx++;
  687. /* Get hashed key */
  688. hw_desc_init(&desc[idx]);
  689. set_cipher_mode(&desc[idx], ctx->hw_mode);
  690. set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
  691. digestsize, NS_BIT, 0);
  692. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  693. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  694. set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
  695. cc_set_endianity(ctx->hash_mode, &desc[idx]);
  696. idx++;
  697. hw_desc_init(&desc[idx]);
  698. set_din_const(&desc[idx], 0, (blocksize - digestsize));
  699. set_flow_mode(&desc[idx], BYPASS);
  700. set_dout_dlli(&desc[idx],
  701. (ctx->opad_tmp_keys_dma_addr +
  702. digestsize),
  703. (blocksize - digestsize), NS_BIT, 0);
  704. idx++;
  705. } else {
  706. hw_desc_init(&desc[idx]);
  707. set_din_type(&desc[idx], DMA_DLLI,
  708. ctx->key_params.key_dma_addr, keylen,
  709. NS_BIT);
  710. set_flow_mode(&desc[idx], BYPASS);
  711. set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
  712. keylen, NS_BIT, 0);
  713. idx++;
  714. if ((blocksize - keylen)) {
  715. hw_desc_init(&desc[idx]);
  716. set_din_const(&desc[idx], 0,
  717. (blocksize - keylen));
  718. set_flow_mode(&desc[idx], BYPASS);
  719. set_dout_dlli(&desc[idx],
  720. (ctx->opad_tmp_keys_dma_addr +
  721. keylen), (blocksize - keylen),
  722. NS_BIT, 0);
  723. idx++;
  724. }
  725. }
  726. } else {
  727. hw_desc_init(&desc[idx]);
  728. set_din_const(&desc[idx], 0, blocksize);
  729. set_flow_mode(&desc[idx], BYPASS);
  730. set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
  731. blocksize, NS_BIT, 0);
  732. idx++;
  733. }
  734. rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
  735. if (rc) {
  736. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  737. goto out;
  738. }
  739. /* calc derived HMAC key */
  740. for (idx = 0, i = 0; i < 2; i++) {
  741. /* Load hash initial state */
  742. hw_desc_init(&desc[idx]);
  743. set_cipher_mode(&desc[idx], ctx->hw_mode);
  744. set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
  745. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  746. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  747. idx++;
  748. /* Load the hash current length*/
  749. hw_desc_init(&desc[idx]);
  750. set_cipher_mode(&desc[idx], ctx->hw_mode);
  751. set_din_const(&desc[idx], 0, ctx->hash_len);
  752. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  753. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  754. idx++;
  755. /* Prepare ipad key */
  756. hw_desc_init(&desc[idx]);
  757. set_xor_val(&desc[idx], hmac_pad_const[i]);
  758. set_cipher_mode(&desc[idx], ctx->hw_mode);
  759. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  760. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  761. idx++;
  762. /* Perform HASH update */
  763. hw_desc_init(&desc[idx]);
  764. set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
  765. blocksize, NS_BIT);
  766. set_cipher_mode(&desc[idx], ctx->hw_mode);
  767. set_xor_active(&desc[idx]);
  768. set_flow_mode(&desc[idx], DIN_HASH);
  769. idx++;
  770. /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
  771. * of the first HASH "update" state)
  772. */
  773. hw_desc_init(&desc[idx]);
  774. set_cipher_mode(&desc[idx], ctx->hw_mode);
  775. if (i > 0) /* Not first iteration */
  776. set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
  777. ctx->inter_digestsize, NS_BIT, 0);
  778. else /* First iteration */
  779. set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
  780. ctx->inter_digestsize, NS_BIT, 0);
  781. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  782. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  783. idx++;
  784. }
  785. rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
  786. out:
  787. if (ctx->key_params.key_dma_addr) {
  788. dma_unmap_single(dev, ctx->key_params.key_dma_addr,
  789. ctx->key_params.keylen, DMA_TO_DEVICE);
  790. dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
  791. &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
  792. }
  793. kfree_sensitive(ctx->key_params.key);
  794. return rc;
  795. }
  796. static int cc_xcbc_setkey(struct crypto_ahash *ahash,
  797. const u8 *key, unsigned int keylen)
  798. {
  799. struct cc_crypto_req cc_req = {};
  800. struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  801. struct device *dev = drvdata_to_dev(ctx->drvdata);
  802. int rc = 0;
  803. unsigned int idx = 0;
  804. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  805. dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
  806. switch (keylen) {
  807. case AES_KEYSIZE_128:
  808. case AES_KEYSIZE_192:
  809. case AES_KEYSIZE_256:
  810. break;
  811. default:
  812. return -EINVAL;
  813. }
  814. ctx->key_params.keylen = keylen;
  815. ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
  816. if (!ctx->key_params.key)
  817. return -ENOMEM;
  818. ctx->key_params.key_dma_addr =
  819. dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
  820. if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
  821. dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
  822. key, keylen);
  823. kfree_sensitive(ctx->key_params.key);
  824. return -ENOMEM;
  825. }
  826. dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
  827. &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
  828. ctx->is_hmac = true;
  829. /* 1. Load the AES key */
  830. hw_desc_init(&desc[idx]);
  831. set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
  832. keylen, NS_BIT);
  833. set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
  834. set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
  835. set_key_size_aes(&desc[idx], keylen);
  836. set_flow_mode(&desc[idx], S_DIN_to_AES);
  837. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  838. idx++;
  839. hw_desc_init(&desc[idx]);
  840. set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
  841. set_flow_mode(&desc[idx], DIN_AES_DOUT);
  842. set_dout_dlli(&desc[idx],
  843. (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
  844. CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
  845. idx++;
  846. hw_desc_init(&desc[idx]);
  847. set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
  848. set_flow_mode(&desc[idx], DIN_AES_DOUT);
  849. set_dout_dlli(&desc[idx],
  850. (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
  851. CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
  852. idx++;
  853. hw_desc_init(&desc[idx]);
  854. set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
  855. set_flow_mode(&desc[idx], DIN_AES_DOUT);
  856. set_dout_dlli(&desc[idx],
  857. (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
  858. CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
  859. idx++;
  860. rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
  861. dma_unmap_single(dev, ctx->key_params.key_dma_addr,
  862. ctx->key_params.keylen, DMA_TO_DEVICE);
  863. dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
  864. &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
  865. kfree_sensitive(ctx->key_params.key);
  866. return rc;
  867. }
  868. static int cc_cmac_setkey(struct crypto_ahash *ahash,
  869. const u8 *key, unsigned int keylen)
  870. {
  871. struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  872. struct device *dev = drvdata_to_dev(ctx->drvdata);
  873. dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
  874. ctx->is_hmac = true;
  875. switch (keylen) {
  876. case AES_KEYSIZE_128:
  877. case AES_KEYSIZE_192:
  878. case AES_KEYSIZE_256:
  879. break;
  880. default:
  881. return -EINVAL;
  882. }
  883. ctx->key_params.keylen = keylen;
  884. /* STAT_PHASE_1: Copy key to ctx */
  885. dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
  886. keylen, DMA_TO_DEVICE);
  887. memcpy(ctx->opad_tmp_keys_buff, key, keylen);
  888. if (keylen == 24) {
  889. memset(ctx->opad_tmp_keys_buff + 24, 0,
  890. CC_AES_KEY_SIZE_MAX - 24);
  891. }
  892. dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
  893. keylen, DMA_TO_DEVICE);
  894. ctx->key_params.keylen = keylen;
  895. return 0;
  896. }
  897. static void cc_free_ctx(struct cc_hash_ctx *ctx)
  898. {
  899. struct device *dev = drvdata_to_dev(ctx->drvdata);
  900. if (ctx->digest_buff_dma_addr) {
  901. dma_unmap_single(dev, ctx->digest_buff_dma_addr,
  902. sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
  903. dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
  904. &ctx->digest_buff_dma_addr);
  905. ctx->digest_buff_dma_addr = 0;
  906. }
  907. if (ctx->opad_tmp_keys_dma_addr) {
  908. dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
  909. sizeof(ctx->opad_tmp_keys_buff),
  910. DMA_BIDIRECTIONAL);
  911. dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
  912. &ctx->opad_tmp_keys_dma_addr);
  913. ctx->opad_tmp_keys_dma_addr = 0;
  914. }
  915. ctx->key_params.keylen = 0;
  916. }
  917. static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
  918. {
  919. struct device *dev = drvdata_to_dev(ctx->drvdata);
  920. ctx->key_params.keylen = 0;
  921. ctx->digest_buff_dma_addr =
  922. dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
  923. DMA_BIDIRECTIONAL);
  924. if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
  925. dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
  926. sizeof(ctx->digest_buff), ctx->digest_buff);
  927. goto fail;
  928. }
  929. dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
  930. sizeof(ctx->digest_buff), ctx->digest_buff,
  931. &ctx->digest_buff_dma_addr);
  932. ctx->opad_tmp_keys_dma_addr =
  933. dma_map_single(dev, ctx->opad_tmp_keys_buff,
  934. sizeof(ctx->opad_tmp_keys_buff),
  935. DMA_BIDIRECTIONAL);
  936. if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
  937. dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
  938. sizeof(ctx->opad_tmp_keys_buff),
  939. ctx->opad_tmp_keys_buff);
  940. goto fail;
  941. }
  942. dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
  943. sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
  944. &ctx->opad_tmp_keys_dma_addr);
  945. ctx->is_hmac = false;
  946. return 0;
  947. fail:
  948. cc_free_ctx(ctx);
  949. return -ENOMEM;
  950. }
  951. static int cc_get_hash_len(struct crypto_tfm *tfm)
  952. {
  953. struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  954. if (ctx->hash_mode == DRV_HASH_SM3)
  955. return CC_SM3_HASH_LEN_SIZE;
  956. else
  957. return cc_get_default_hash_len(ctx->drvdata);
  958. }
  959. static int cc_cra_init(struct crypto_tfm *tfm)
  960. {
  961. struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  962. struct hash_alg_common *hash_alg_common =
  963. container_of(tfm->__crt_alg, struct hash_alg_common, base);
  964. struct ahash_alg *ahash_alg =
  965. container_of(hash_alg_common, struct ahash_alg, halg);
  966. struct cc_hash_alg *cc_alg =
  967. container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
  968. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  969. sizeof(struct ahash_req_ctx));
  970. ctx->hash_mode = cc_alg->hash_mode;
  971. ctx->hw_mode = cc_alg->hw_mode;
  972. ctx->inter_digestsize = cc_alg->inter_digestsize;
  973. ctx->drvdata = cc_alg->drvdata;
  974. ctx->hash_len = cc_get_hash_len(tfm);
  975. return cc_alloc_ctx(ctx);
  976. }
  977. static void cc_cra_exit(struct crypto_tfm *tfm)
  978. {
  979. struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  980. struct device *dev = drvdata_to_dev(ctx->drvdata);
  981. dev_dbg(dev, "cc_cra_exit");
  982. cc_free_ctx(ctx);
  983. }
  984. static int cc_mac_update(struct ahash_request *req)
  985. {
  986. struct ahash_req_ctx *state = ahash_request_ctx(req);
  987. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  988. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  989. struct device *dev = drvdata_to_dev(ctx->drvdata);
  990. unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
  991. struct cc_crypto_req cc_req = {};
  992. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  993. int rc;
  994. u32 idx = 0;
  995. gfp_t flags = cc_gfp_flags(&req->base);
  996. if (req->nbytes == 0) {
  997. /* no real updates required */
  998. return 0;
  999. }
  1000. state->xcbc_count++;
  1001. rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
  1002. req->nbytes, block_size, flags);
  1003. if (rc) {
  1004. if (rc == 1) {
  1005. dev_dbg(dev, " data size not require HW update %x\n",
  1006. req->nbytes);
  1007. /* No hardware updates are required */
  1008. return 0;
  1009. }
  1010. dev_err(dev, "map_ahash_request_update() failed\n");
  1011. return -ENOMEM;
  1012. }
  1013. if (cc_map_req(dev, state, ctx)) {
  1014. dev_err(dev, "map_ahash_source() failed\n");
  1015. return -EINVAL;
  1016. }
  1017. if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
  1018. cc_setup_xcbc(req, desc, &idx);
  1019. else
  1020. cc_setup_cmac(req, desc, &idx);
  1021. cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
  1022. /* store the hash digest result in context */
  1023. hw_desc_init(&desc[idx]);
  1024. set_cipher_mode(&desc[idx], ctx->hw_mode);
  1025. set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
  1026. ctx->inter_digestsize, NS_BIT, 1);
  1027. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  1028. set_flow_mode(&desc[idx], S_AES_to_DOUT);
  1029. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  1030. idx++;
  1031. /* Setup request structure */
  1032. cc_req.user_cb = cc_update_complete;
  1033. cc_req.user_arg = req;
  1034. rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
  1035. if (rc != -EINPROGRESS && rc != -EBUSY) {
  1036. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  1037. cc_unmap_hash_request(dev, state, req->src, true);
  1038. cc_unmap_req(dev, state, ctx);
  1039. }
  1040. return rc;
  1041. }
  1042. static int cc_mac_final(struct ahash_request *req)
  1043. {
  1044. struct ahash_req_ctx *state = ahash_request_ctx(req);
  1045. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1046. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1047. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1048. struct cc_crypto_req cc_req = {};
  1049. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  1050. int idx = 0;
  1051. int rc = 0;
  1052. u32 key_size, key_len;
  1053. u32 digestsize = crypto_ahash_digestsize(tfm);
  1054. gfp_t flags = cc_gfp_flags(&req->base);
  1055. u32 rem_cnt = *cc_hash_buf_cnt(state);
  1056. if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
  1057. key_size = CC_AES_128_BIT_KEY_SIZE;
  1058. key_len = CC_AES_128_BIT_KEY_SIZE;
  1059. } else {
  1060. key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
  1061. ctx->key_params.keylen;
  1062. key_len = ctx->key_params.keylen;
  1063. }
  1064. dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
  1065. if (cc_map_req(dev, state, ctx)) {
  1066. dev_err(dev, "map_ahash_source() failed\n");
  1067. return -EINVAL;
  1068. }
  1069. if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
  1070. req->nbytes, 0, flags)) {
  1071. dev_err(dev, "map_ahash_request_final() failed\n");
  1072. cc_unmap_req(dev, state, ctx);
  1073. return -ENOMEM;
  1074. }
  1075. if (cc_map_result(dev, state, digestsize)) {
  1076. dev_err(dev, "map_ahash_digest() failed\n");
  1077. cc_unmap_hash_request(dev, state, req->src, true);
  1078. cc_unmap_req(dev, state, ctx);
  1079. return -ENOMEM;
  1080. }
  1081. /* Setup request structure */
  1082. cc_req.user_cb = cc_hash_complete;
  1083. cc_req.user_arg = req;
  1084. if (state->xcbc_count && rem_cnt == 0) {
  1085. /* Load key for ECB decryption */
  1086. hw_desc_init(&desc[idx]);
  1087. set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
  1088. set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
  1089. set_din_type(&desc[idx], DMA_DLLI,
  1090. (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
  1091. key_size, NS_BIT);
  1092. set_key_size_aes(&desc[idx], key_len);
  1093. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1094. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1095. idx++;
  1096. /* Initiate decryption of block state to previous
  1097. * block_state-XOR-M[n]
  1098. */
  1099. hw_desc_init(&desc[idx]);
  1100. set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
  1101. CC_AES_BLOCK_SIZE, NS_BIT);
  1102. set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
  1103. CC_AES_BLOCK_SIZE, NS_BIT, 0);
  1104. set_flow_mode(&desc[idx], DIN_AES_DOUT);
  1105. idx++;
  1106. /* Memory Barrier: wait for axi write to complete */
  1107. hw_desc_init(&desc[idx]);
  1108. set_din_no_dma(&desc[idx], 0, 0xfffff0);
  1109. set_dout_no_dma(&desc[idx], 0, 0, 1);
  1110. idx++;
  1111. }
  1112. if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
  1113. cc_setup_xcbc(req, desc, &idx);
  1114. else
  1115. cc_setup_cmac(req, desc, &idx);
  1116. if (state->xcbc_count == 0) {
  1117. hw_desc_init(&desc[idx]);
  1118. set_cipher_mode(&desc[idx], ctx->hw_mode);
  1119. set_key_size_aes(&desc[idx], key_len);
  1120. set_cmac_size0_mode(&desc[idx]);
  1121. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1122. idx++;
  1123. } else if (rem_cnt > 0) {
  1124. cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
  1125. } else {
  1126. hw_desc_init(&desc[idx]);
  1127. set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
  1128. set_flow_mode(&desc[idx], DIN_AES_DOUT);
  1129. idx++;
  1130. }
  1131. /* Get final MAC result */
  1132. hw_desc_init(&desc[idx]);
  1133. set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
  1134. digestsize, NS_BIT, 1);
  1135. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  1136. set_flow_mode(&desc[idx], S_AES_to_DOUT);
  1137. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  1138. set_cipher_mode(&desc[idx], ctx->hw_mode);
  1139. idx++;
  1140. rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
  1141. if (rc != -EINPROGRESS && rc != -EBUSY) {
  1142. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  1143. cc_unmap_hash_request(dev, state, req->src, true);
  1144. cc_unmap_result(dev, state, digestsize, req->result);
  1145. cc_unmap_req(dev, state, ctx);
  1146. }
  1147. return rc;
  1148. }
  1149. static int cc_mac_finup(struct ahash_request *req)
  1150. {
  1151. struct ahash_req_ctx *state = ahash_request_ctx(req);
  1152. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1153. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1154. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1155. struct cc_crypto_req cc_req = {};
  1156. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  1157. int idx = 0;
  1158. int rc = 0;
  1159. u32 key_len = 0;
  1160. u32 digestsize = crypto_ahash_digestsize(tfm);
  1161. gfp_t flags = cc_gfp_flags(&req->base);
  1162. dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
  1163. if (state->xcbc_count > 0 && req->nbytes == 0) {
  1164. dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
  1165. return cc_mac_final(req);
  1166. }
  1167. if (cc_map_req(dev, state, ctx)) {
  1168. dev_err(dev, "map_ahash_source() failed\n");
  1169. return -EINVAL;
  1170. }
  1171. if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
  1172. req->nbytes, 1, flags)) {
  1173. dev_err(dev, "map_ahash_request_final() failed\n");
  1174. cc_unmap_req(dev, state, ctx);
  1175. return -ENOMEM;
  1176. }
  1177. if (cc_map_result(dev, state, digestsize)) {
  1178. dev_err(dev, "map_ahash_digest() failed\n");
  1179. cc_unmap_hash_request(dev, state, req->src, true);
  1180. cc_unmap_req(dev, state, ctx);
  1181. return -ENOMEM;
  1182. }
  1183. /* Setup request structure */
  1184. cc_req.user_cb = cc_hash_complete;
  1185. cc_req.user_arg = req;
  1186. if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
  1187. key_len = CC_AES_128_BIT_KEY_SIZE;
  1188. cc_setup_xcbc(req, desc, &idx);
  1189. } else {
  1190. key_len = ctx->key_params.keylen;
  1191. cc_setup_cmac(req, desc, &idx);
  1192. }
  1193. if (req->nbytes == 0) {
  1194. hw_desc_init(&desc[idx]);
  1195. set_cipher_mode(&desc[idx], ctx->hw_mode);
  1196. set_key_size_aes(&desc[idx], key_len);
  1197. set_cmac_size0_mode(&desc[idx]);
  1198. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1199. idx++;
  1200. } else {
  1201. cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
  1202. }
  1203. /* Get final MAC result */
  1204. hw_desc_init(&desc[idx]);
  1205. set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
  1206. digestsize, NS_BIT, 1);
  1207. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  1208. set_flow_mode(&desc[idx], S_AES_to_DOUT);
  1209. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  1210. set_cipher_mode(&desc[idx], ctx->hw_mode);
  1211. idx++;
  1212. rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
  1213. if (rc != -EINPROGRESS && rc != -EBUSY) {
  1214. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  1215. cc_unmap_hash_request(dev, state, req->src, true);
  1216. cc_unmap_result(dev, state, digestsize, req->result);
  1217. cc_unmap_req(dev, state, ctx);
  1218. }
  1219. return rc;
  1220. }
  1221. static int cc_mac_digest(struct ahash_request *req)
  1222. {
  1223. struct ahash_req_ctx *state = ahash_request_ctx(req);
  1224. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1225. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1226. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1227. u32 digestsize = crypto_ahash_digestsize(tfm);
  1228. struct cc_crypto_req cc_req = {};
  1229. struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
  1230. u32 key_len;
  1231. unsigned int idx = 0;
  1232. int rc;
  1233. gfp_t flags = cc_gfp_flags(&req->base);
  1234. dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
  1235. cc_init_req(dev, state, ctx);
  1236. if (cc_map_req(dev, state, ctx)) {
  1237. dev_err(dev, "map_ahash_source() failed\n");
  1238. return -ENOMEM;
  1239. }
  1240. if (cc_map_result(dev, state, digestsize)) {
  1241. dev_err(dev, "map_ahash_digest() failed\n");
  1242. cc_unmap_req(dev, state, ctx);
  1243. return -ENOMEM;
  1244. }
  1245. if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
  1246. req->nbytes, 1, flags)) {
  1247. dev_err(dev, "map_ahash_request_final() failed\n");
  1248. cc_unmap_req(dev, state, ctx);
  1249. return -ENOMEM;
  1250. }
  1251. /* Setup request structure */
  1252. cc_req.user_cb = cc_digest_complete;
  1253. cc_req.user_arg = req;
  1254. if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
  1255. key_len = CC_AES_128_BIT_KEY_SIZE;
  1256. cc_setup_xcbc(req, desc, &idx);
  1257. } else {
  1258. key_len = ctx->key_params.keylen;
  1259. cc_setup_cmac(req, desc, &idx);
  1260. }
  1261. if (req->nbytes == 0) {
  1262. hw_desc_init(&desc[idx]);
  1263. set_cipher_mode(&desc[idx], ctx->hw_mode);
  1264. set_key_size_aes(&desc[idx], key_len);
  1265. set_cmac_size0_mode(&desc[idx]);
  1266. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1267. idx++;
  1268. } else {
  1269. cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
  1270. }
  1271. /* Get final MAC result */
  1272. hw_desc_init(&desc[idx]);
  1273. set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
  1274. CC_AES_BLOCK_SIZE, NS_BIT, 1);
  1275. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  1276. set_flow_mode(&desc[idx], S_AES_to_DOUT);
  1277. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  1278. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1279. set_cipher_mode(&desc[idx], ctx->hw_mode);
  1280. idx++;
  1281. rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
  1282. if (rc != -EINPROGRESS && rc != -EBUSY) {
  1283. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  1284. cc_unmap_hash_request(dev, state, req->src, true);
  1285. cc_unmap_result(dev, state, digestsize, req->result);
  1286. cc_unmap_req(dev, state, ctx);
  1287. }
  1288. return rc;
  1289. }
  1290. static int cc_hash_export(struct ahash_request *req, void *out)
  1291. {
  1292. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  1293. struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  1294. struct ahash_req_ctx *state = ahash_request_ctx(req);
  1295. u8 *curr_buff = cc_hash_buf(state);
  1296. u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
  1297. const u32 tmp = CC_EXPORT_MAGIC;
  1298. memcpy(out, &tmp, sizeof(u32));
  1299. out += sizeof(u32);
  1300. memcpy(out, state->digest_buff, ctx->inter_digestsize);
  1301. out += ctx->inter_digestsize;
  1302. memcpy(out, state->digest_bytes_len, ctx->hash_len);
  1303. out += ctx->hash_len;
  1304. memcpy(out, &curr_buff_cnt, sizeof(u32));
  1305. out += sizeof(u32);
  1306. memcpy(out, curr_buff, curr_buff_cnt);
  1307. return 0;
  1308. }
  1309. static int cc_hash_import(struct ahash_request *req, const void *in)
  1310. {
  1311. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  1312. struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  1313. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1314. struct ahash_req_ctx *state = ahash_request_ctx(req);
  1315. u32 tmp;
  1316. memcpy(&tmp, in, sizeof(u32));
  1317. if (tmp != CC_EXPORT_MAGIC)
  1318. return -EINVAL;
  1319. in += sizeof(u32);
  1320. cc_init_req(dev, state, ctx);
  1321. memcpy(state->digest_buff, in, ctx->inter_digestsize);
  1322. in += ctx->inter_digestsize;
  1323. memcpy(state->digest_bytes_len, in, ctx->hash_len);
  1324. in += ctx->hash_len;
  1325. /* Sanity check the data as much as possible */
  1326. memcpy(&tmp, in, sizeof(u32));
  1327. if (tmp > CC_MAX_HASH_BLCK_SIZE)
  1328. return -EINVAL;
  1329. in += sizeof(u32);
  1330. state->buf_cnt[0] = tmp;
  1331. memcpy(state->buffers[0], in, tmp);
  1332. return 0;
  1333. }
  1334. struct cc_hash_template {
  1335. char name[CRYPTO_MAX_ALG_NAME];
  1336. char driver_name[CRYPTO_MAX_ALG_NAME];
  1337. char mac_name[CRYPTO_MAX_ALG_NAME];
  1338. char mac_driver_name[CRYPTO_MAX_ALG_NAME];
  1339. unsigned int blocksize;
  1340. bool is_mac;
  1341. bool synchronize;
  1342. struct ahash_alg template_ahash;
  1343. int hash_mode;
  1344. int hw_mode;
  1345. int inter_digestsize;
  1346. struct cc_drvdata *drvdata;
  1347. u32 min_hw_rev;
  1348. enum cc_std_body std_body;
  1349. };
  1350. #define CC_STATE_SIZE(_x) \
  1351. ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
  1352. /* hash descriptors */
  1353. static struct cc_hash_template driver_hash[] = {
  1354. //Asynchronize hash template
  1355. {
  1356. .name = "sha1",
  1357. .driver_name = "sha1-ccree",
  1358. .mac_name = "hmac(sha1)",
  1359. .mac_driver_name = "hmac-sha1-ccree",
  1360. .blocksize = SHA1_BLOCK_SIZE,
  1361. .is_mac = true,
  1362. .synchronize = false,
  1363. .template_ahash = {
  1364. .init = cc_hash_init,
  1365. .update = cc_hash_update,
  1366. .final = cc_hash_final,
  1367. .finup = cc_hash_finup,
  1368. .digest = cc_hash_digest,
  1369. .export = cc_hash_export,
  1370. .import = cc_hash_import,
  1371. .setkey = cc_hash_setkey,
  1372. .halg = {
  1373. .digestsize = SHA1_DIGEST_SIZE,
  1374. .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
  1375. },
  1376. },
  1377. .hash_mode = DRV_HASH_SHA1,
  1378. .hw_mode = DRV_HASH_HW_SHA1,
  1379. .inter_digestsize = SHA1_DIGEST_SIZE,
  1380. .min_hw_rev = CC_HW_REV_630,
  1381. .std_body = CC_STD_NIST,
  1382. },
  1383. {
  1384. .name = "sha256",
  1385. .driver_name = "sha256-ccree",
  1386. .mac_name = "hmac(sha256)",
  1387. .mac_driver_name = "hmac-sha256-ccree",
  1388. .blocksize = SHA256_BLOCK_SIZE,
  1389. .is_mac = true,
  1390. .template_ahash = {
  1391. .init = cc_hash_init,
  1392. .update = cc_hash_update,
  1393. .final = cc_hash_final,
  1394. .finup = cc_hash_finup,
  1395. .digest = cc_hash_digest,
  1396. .export = cc_hash_export,
  1397. .import = cc_hash_import,
  1398. .setkey = cc_hash_setkey,
  1399. .halg = {
  1400. .digestsize = SHA256_DIGEST_SIZE,
  1401. .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
  1402. },
  1403. },
  1404. .hash_mode = DRV_HASH_SHA256,
  1405. .hw_mode = DRV_HASH_HW_SHA256,
  1406. .inter_digestsize = SHA256_DIGEST_SIZE,
  1407. .min_hw_rev = CC_HW_REV_630,
  1408. .std_body = CC_STD_NIST,
  1409. },
  1410. {
  1411. .name = "sha224",
  1412. .driver_name = "sha224-ccree",
  1413. .mac_name = "hmac(sha224)",
  1414. .mac_driver_name = "hmac-sha224-ccree",
  1415. .blocksize = SHA224_BLOCK_SIZE,
  1416. .is_mac = true,
  1417. .template_ahash = {
  1418. .init = cc_hash_init,
  1419. .update = cc_hash_update,
  1420. .final = cc_hash_final,
  1421. .finup = cc_hash_finup,
  1422. .digest = cc_hash_digest,
  1423. .export = cc_hash_export,
  1424. .import = cc_hash_import,
  1425. .setkey = cc_hash_setkey,
  1426. .halg = {
  1427. .digestsize = SHA224_DIGEST_SIZE,
  1428. .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
  1429. },
  1430. },
  1431. .hash_mode = DRV_HASH_SHA224,
  1432. .hw_mode = DRV_HASH_HW_SHA256,
  1433. .inter_digestsize = SHA256_DIGEST_SIZE,
  1434. .min_hw_rev = CC_HW_REV_630,
  1435. .std_body = CC_STD_NIST,
  1436. },
  1437. {
  1438. .name = "sha384",
  1439. .driver_name = "sha384-ccree",
  1440. .mac_name = "hmac(sha384)",
  1441. .mac_driver_name = "hmac-sha384-ccree",
  1442. .blocksize = SHA384_BLOCK_SIZE,
  1443. .is_mac = true,
  1444. .template_ahash = {
  1445. .init = cc_hash_init,
  1446. .update = cc_hash_update,
  1447. .final = cc_hash_final,
  1448. .finup = cc_hash_finup,
  1449. .digest = cc_hash_digest,
  1450. .export = cc_hash_export,
  1451. .import = cc_hash_import,
  1452. .setkey = cc_hash_setkey,
  1453. .halg = {
  1454. .digestsize = SHA384_DIGEST_SIZE,
  1455. .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
  1456. },
  1457. },
  1458. .hash_mode = DRV_HASH_SHA384,
  1459. .hw_mode = DRV_HASH_HW_SHA512,
  1460. .inter_digestsize = SHA512_DIGEST_SIZE,
  1461. .min_hw_rev = CC_HW_REV_712,
  1462. .std_body = CC_STD_NIST,
  1463. },
  1464. {
  1465. .name = "sha512",
  1466. .driver_name = "sha512-ccree",
  1467. .mac_name = "hmac(sha512)",
  1468. .mac_driver_name = "hmac-sha512-ccree",
  1469. .blocksize = SHA512_BLOCK_SIZE,
  1470. .is_mac = true,
  1471. .template_ahash = {
  1472. .init = cc_hash_init,
  1473. .update = cc_hash_update,
  1474. .final = cc_hash_final,
  1475. .finup = cc_hash_finup,
  1476. .digest = cc_hash_digest,
  1477. .export = cc_hash_export,
  1478. .import = cc_hash_import,
  1479. .setkey = cc_hash_setkey,
  1480. .halg = {
  1481. .digestsize = SHA512_DIGEST_SIZE,
  1482. .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
  1483. },
  1484. },
  1485. .hash_mode = DRV_HASH_SHA512,
  1486. .hw_mode = DRV_HASH_HW_SHA512,
  1487. .inter_digestsize = SHA512_DIGEST_SIZE,
  1488. .min_hw_rev = CC_HW_REV_712,
  1489. .std_body = CC_STD_NIST,
  1490. },
  1491. {
  1492. .name = "md5",
  1493. .driver_name = "md5-ccree",
  1494. .mac_name = "hmac(md5)",
  1495. .mac_driver_name = "hmac-md5-ccree",
  1496. .blocksize = MD5_HMAC_BLOCK_SIZE,
  1497. .is_mac = true,
  1498. .template_ahash = {
  1499. .init = cc_hash_init,
  1500. .update = cc_hash_update,
  1501. .final = cc_hash_final,
  1502. .finup = cc_hash_finup,
  1503. .digest = cc_hash_digest,
  1504. .export = cc_hash_export,
  1505. .import = cc_hash_import,
  1506. .setkey = cc_hash_setkey,
  1507. .halg = {
  1508. .digestsize = MD5_DIGEST_SIZE,
  1509. .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
  1510. },
  1511. },
  1512. .hash_mode = DRV_HASH_MD5,
  1513. .hw_mode = DRV_HASH_HW_MD5,
  1514. .inter_digestsize = MD5_DIGEST_SIZE,
  1515. .min_hw_rev = CC_HW_REV_630,
  1516. .std_body = CC_STD_NIST,
  1517. },
  1518. {
  1519. .name = "sm3",
  1520. .driver_name = "sm3-ccree",
  1521. .blocksize = SM3_BLOCK_SIZE,
  1522. .is_mac = false,
  1523. .template_ahash = {
  1524. .init = cc_hash_init,
  1525. .update = cc_hash_update,
  1526. .final = cc_hash_final,
  1527. .finup = cc_hash_finup,
  1528. .digest = cc_hash_digest,
  1529. .export = cc_hash_export,
  1530. .import = cc_hash_import,
  1531. .setkey = cc_hash_setkey,
  1532. .halg = {
  1533. .digestsize = SM3_DIGEST_SIZE,
  1534. .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
  1535. },
  1536. },
  1537. .hash_mode = DRV_HASH_SM3,
  1538. .hw_mode = DRV_HASH_HW_SM3,
  1539. .inter_digestsize = SM3_DIGEST_SIZE,
  1540. .min_hw_rev = CC_HW_REV_713,
  1541. .std_body = CC_STD_OSCCA,
  1542. },
  1543. {
  1544. .mac_name = "xcbc(aes)",
  1545. .mac_driver_name = "xcbc-aes-ccree",
  1546. .blocksize = AES_BLOCK_SIZE,
  1547. .is_mac = true,
  1548. .template_ahash = {
  1549. .init = cc_hash_init,
  1550. .update = cc_mac_update,
  1551. .final = cc_mac_final,
  1552. .finup = cc_mac_finup,
  1553. .digest = cc_mac_digest,
  1554. .setkey = cc_xcbc_setkey,
  1555. .export = cc_hash_export,
  1556. .import = cc_hash_import,
  1557. .halg = {
  1558. .digestsize = AES_BLOCK_SIZE,
  1559. .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
  1560. },
  1561. },
  1562. .hash_mode = DRV_HASH_NULL,
  1563. .hw_mode = DRV_CIPHER_XCBC_MAC,
  1564. .inter_digestsize = AES_BLOCK_SIZE,
  1565. .min_hw_rev = CC_HW_REV_630,
  1566. .std_body = CC_STD_NIST,
  1567. },
  1568. {
  1569. .mac_name = "cmac(aes)",
  1570. .mac_driver_name = "cmac-aes-ccree",
  1571. .blocksize = AES_BLOCK_SIZE,
  1572. .is_mac = true,
  1573. .template_ahash = {
  1574. .init = cc_hash_init,
  1575. .update = cc_mac_update,
  1576. .final = cc_mac_final,
  1577. .finup = cc_mac_finup,
  1578. .digest = cc_mac_digest,
  1579. .setkey = cc_cmac_setkey,
  1580. .export = cc_hash_export,
  1581. .import = cc_hash_import,
  1582. .halg = {
  1583. .digestsize = AES_BLOCK_SIZE,
  1584. .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
  1585. },
  1586. },
  1587. .hash_mode = DRV_HASH_NULL,
  1588. .hw_mode = DRV_CIPHER_CMAC,
  1589. .inter_digestsize = AES_BLOCK_SIZE,
  1590. .min_hw_rev = CC_HW_REV_630,
  1591. .std_body = CC_STD_NIST,
  1592. },
  1593. };
  1594. static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
  1595. struct device *dev, bool keyed)
  1596. {
  1597. struct cc_hash_alg *t_crypto_alg;
  1598. struct crypto_alg *alg;
  1599. struct ahash_alg *halg;
  1600. t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
  1601. if (!t_crypto_alg)
  1602. return ERR_PTR(-ENOMEM);
  1603. t_crypto_alg->ahash_alg = template->template_ahash;
  1604. halg = &t_crypto_alg->ahash_alg;
  1605. alg = &halg->halg.base;
  1606. if (keyed) {
  1607. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
  1608. template->mac_name);
  1609. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  1610. template->mac_driver_name);
  1611. } else {
  1612. halg->setkey = NULL;
  1613. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
  1614. template->name);
  1615. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  1616. template->driver_name);
  1617. }
  1618. alg->cra_module = THIS_MODULE;
  1619. alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
  1620. alg->cra_priority = CC_CRA_PRIO;
  1621. alg->cra_blocksize = template->blocksize;
  1622. alg->cra_alignmask = 0;
  1623. alg->cra_exit = cc_cra_exit;
  1624. alg->cra_init = cc_cra_init;
  1625. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  1626. t_crypto_alg->hash_mode = template->hash_mode;
  1627. t_crypto_alg->hw_mode = template->hw_mode;
  1628. t_crypto_alg->inter_digestsize = template->inter_digestsize;
  1629. return t_crypto_alg;
  1630. }
  1631. static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
  1632. unsigned int size, u32 *sram_buff_ofs)
  1633. {
  1634. struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
  1635. unsigned int larval_seq_len = 0;
  1636. int rc;
  1637. cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
  1638. larval_seq, &larval_seq_len);
  1639. rc = send_request_init(drvdata, larval_seq, larval_seq_len);
  1640. if (rc)
  1641. return rc;
  1642. *sram_buff_ofs += size;
  1643. return 0;
  1644. }
  1645. int cc_init_hash_sram(struct cc_drvdata *drvdata)
  1646. {
  1647. struct cc_hash_handle *hash_handle = drvdata->hash_handle;
  1648. u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
  1649. bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
  1650. bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
  1651. int rc = 0;
  1652. /* Copy-to-sram digest-len */
  1653. rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
  1654. sizeof(cc_digest_len_init), &sram_buff_ofs);
  1655. if (rc)
  1656. goto init_digest_const_err;
  1657. if (large_sha_supported) {
  1658. /* Copy-to-sram digest-len for sha384/512 */
  1659. rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
  1660. sizeof(cc_digest_len_sha512_init),
  1661. &sram_buff_ofs);
  1662. if (rc)
  1663. goto init_digest_const_err;
  1664. }
  1665. /* The initial digests offset */
  1666. hash_handle->larval_digest_sram_addr = sram_buff_ofs;
  1667. /* Copy-to-sram initial SHA* digests */
  1668. rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
  1669. &sram_buff_ofs);
  1670. if (rc)
  1671. goto init_digest_const_err;
  1672. rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
  1673. &sram_buff_ofs);
  1674. if (rc)
  1675. goto init_digest_const_err;
  1676. rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
  1677. &sram_buff_ofs);
  1678. if (rc)
  1679. goto init_digest_const_err;
  1680. rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
  1681. &sram_buff_ofs);
  1682. if (rc)
  1683. goto init_digest_const_err;
  1684. if (sm3_supported) {
  1685. rc = cc_init_copy_sram(drvdata, cc_sm3_init,
  1686. sizeof(cc_sm3_init), &sram_buff_ofs);
  1687. if (rc)
  1688. goto init_digest_const_err;
  1689. }
  1690. if (large_sha_supported) {
  1691. rc = cc_init_copy_sram(drvdata, cc_sha384_init,
  1692. sizeof(cc_sha384_init), &sram_buff_ofs);
  1693. if (rc)
  1694. goto init_digest_const_err;
  1695. rc = cc_init_copy_sram(drvdata, cc_sha512_init,
  1696. sizeof(cc_sha512_init), &sram_buff_ofs);
  1697. if (rc)
  1698. goto init_digest_const_err;
  1699. }
  1700. init_digest_const_err:
  1701. return rc;
  1702. }
  1703. int cc_hash_alloc(struct cc_drvdata *drvdata)
  1704. {
  1705. struct cc_hash_handle *hash_handle;
  1706. u32 sram_buff;
  1707. u32 sram_size_to_alloc;
  1708. struct device *dev = drvdata_to_dev(drvdata);
  1709. int rc = 0;
  1710. int alg;
  1711. hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
  1712. if (!hash_handle)
  1713. return -ENOMEM;
  1714. INIT_LIST_HEAD(&hash_handle->hash_list);
  1715. drvdata->hash_handle = hash_handle;
  1716. sram_size_to_alloc = sizeof(cc_digest_len_init) +
  1717. sizeof(cc_md5_init) +
  1718. sizeof(cc_sha1_init) +
  1719. sizeof(cc_sha224_init) +
  1720. sizeof(cc_sha256_init);
  1721. if (drvdata->hw_rev >= CC_HW_REV_713)
  1722. sram_size_to_alloc += sizeof(cc_sm3_init);
  1723. if (drvdata->hw_rev >= CC_HW_REV_712)
  1724. sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
  1725. sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
  1726. sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
  1727. if (sram_buff == NULL_SRAM_ADDR) {
  1728. rc = -ENOMEM;
  1729. goto fail;
  1730. }
  1731. /* The initial digest-len offset */
  1732. hash_handle->digest_len_sram_addr = sram_buff;
  1733. /*must be set before the alg registration as it is being used there*/
  1734. rc = cc_init_hash_sram(drvdata);
  1735. if (rc) {
  1736. dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
  1737. goto fail;
  1738. }
  1739. /* ahash registration */
  1740. for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
  1741. struct cc_hash_alg *t_alg;
  1742. int hw_mode = driver_hash[alg].hw_mode;
  1743. /* Check that the HW revision and variants are suitable */
  1744. if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
  1745. !(drvdata->std_bodies & driver_hash[alg].std_body))
  1746. continue;
  1747. if (driver_hash[alg].is_mac) {
  1748. /* register hmac version */
  1749. t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
  1750. if (IS_ERR(t_alg)) {
  1751. rc = PTR_ERR(t_alg);
  1752. dev_err(dev, "%s alg allocation failed\n",
  1753. driver_hash[alg].driver_name);
  1754. goto fail;
  1755. }
  1756. t_alg->drvdata = drvdata;
  1757. rc = crypto_register_ahash(&t_alg->ahash_alg);
  1758. if (rc) {
  1759. dev_err(dev, "%s alg registration failed\n",
  1760. driver_hash[alg].driver_name);
  1761. goto fail;
  1762. }
  1763. list_add_tail(&t_alg->entry, &hash_handle->hash_list);
  1764. }
  1765. if (hw_mode == DRV_CIPHER_XCBC_MAC ||
  1766. hw_mode == DRV_CIPHER_CMAC)
  1767. continue;
  1768. /* register hash version */
  1769. t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
  1770. if (IS_ERR(t_alg)) {
  1771. rc = PTR_ERR(t_alg);
  1772. dev_err(dev, "%s alg allocation failed\n",
  1773. driver_hash[alg].driver_name);
  1774. goto fail;
  1775. }
  1776. t_alg->drvdata = drvdata;
  1777. rc = crypto_register_ahash(&t_alg->ahash_alg);
  1778. if (rc) {
  1779. dev_err(dev, "%s alg registration failed\n",
  1780. driver_hash[alg].driver_name);
  1781. goto fail;
  1782. }
  1783. list_add_tail(&t_alg->entry, &hash_handle->hash_list);
  1784. }
  1785. return 0;
  1786. fail:
  1787. cc_hash_free(drvdata);
  1788. return rc;
  1789. }
  1790. int cc_hash_free(struct cc_drvdata *drvdata)
  1791. {
  1792. struct cc_hash_alg *t_hash_alg, *hash_n;
  1793. struct cc_hash_handle *hash_handle = drvdata->hash_handle;
  1794. list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
  1795. entry) {
  1796. crypto_unregister_ahash(&t_hash_alg->ahash_alg);
  1797. list_del(&t_hash_alg->entry);
  1798. }
  1799. return 0;
  1800. }
  1801. static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
  1802. unsigned int *seq_size)
  1803. {
  1804. unsigned int idx = *seq_size;
  1805. struct ahash_req_ctx *state = ahash_request_ctx(areq);
  1806. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1807. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1808. /* Setup XCBC MAC K1 */
  1809. hw_desc_init(&desc[idx]);
  1810. set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
  1811. XCBC_MAC_K1_OFFSET),
  1812. CC_AES_128_BIT_KEY_SIZE, NS_BIT);
  1813. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1814. set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
  1815. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1816. set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
  1817. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1818. idx++;
  1819. /* Setup XCBC MAC K2 */
  1820. hw_desc_init(&desc[idx]);
  1821. set_din_type(&desc[idx], DMA_DLLI,
  1822. (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
  1823. CC_AES_128_BIT_KEY_SIZE, NS_BIT);
  1824. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  1825. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  1826. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1827. set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
  1828. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1829. idx++;
  1830. /* Setup XCBC MAC K3 */
  1831. hw_desc_init(&desc[idx]);
  1832. set_din_type(&desc[idx], DMA_DLLI,
  1833. (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
  1834. CC_AES_128_BIT_KEY_SIZE, NS_BIT);
  1835. set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
  1836. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  1837. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1838. set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
  1839. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1840. idx++;
  1841. /* Loading MAC state */
  1842. hw_desc_init(&desc[idx]);
  1843. set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
  1844. CC_AES_BLOCK_SIZE, NS_BIT);
  1845. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  1846. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  1847. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1848. set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
  1849. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1850. idx++;
  1851. *seq_size = idx;
  1852. }
  1853. static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
  1854. unsigned int *seq_size)
  1855. {
  1856. unsigned int idx = *seq_size;
  1857. struct ahash_req_ctx *state = ahash_request_ctx(areq);
  1858. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1859. struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1860. /* Setup CMAC Key */
  1861. hw_desc_init(&desc[idx]);
  1862. set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
  1863. ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
  1864. ctx->key_params.keylen), NS_BIT);
  1865. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1866. set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
  1867. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1868. set_key_size_aes(&desc[idx], ctx->key_params.keylen);
  1869. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1870. idx++;
  1871. /* Load MAC state */
  1872. hw_desc_init(&desc[idx]);
  1873. set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
  1874. CC_AES_BLOCK_SIZE, NS_BIT);
  1875. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  1876. set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
  1877. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1878. set_key_size_aes(&desc[idx], ctx->key_params.keylen);
  1879. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1880. idx++;
  1881. *seq_size = idx;
  1882. }
  1883. static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
  1884. struct cc_hash_ctx *ctx, unsigned int flow_mode,
  1885. struct cc_hw_desc desc[], bool is_not_last_data,
  1886. unsigned int *seq_size)
  1887. {
  1888. unsigned int idx = *seq_size;
  1889. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1890. if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
  1891. hw_desc_init(&desc[idx]);
  1892. set_din_type(&desc[idx], DMA_DLLI,
  1893. sg_dma_address(areq_ctx->curr_sg),
  1894. areq_ctx->curr_sg->length, NS_BIT);
  1895. set_flow_mode(&desc[idx], flow_mode);
  1896. idx++;
  1897. } else {
  1898. if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
  1899. dev_dbg(dev, " NULL mode\n");
  1900. /* nothing to build */
  1901. return;
  1902. }
  1903. /* bypass */
  1904. hw_desc_init(&desc[idx]);
  1905. set_din_type(&desc[idx], DMA_DLLI,
  1906. areq_ctx->mlli_params.mlli_dma_addr,
  1907. areq_ctx->mlli_params.mlli_len, NS_BIT);
  1908. set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
  1909. areq_ctx->mlli_params.mlli_len);
  1910. set_flow_mode(&desc[idx], BYPASS);
  1911. idx++;
  1912. /* process */
  1913. hw_desc_init(&desc[idx]);
  1914. set_din_type(&desc[idx], DMA_MLLI,
  1915. ctx->drvdata->mlli_sram_addr,
  1916. areq_ctx->mlli_nents, NS_BIT);
  1917. set_flow_mode(&desc[idx], flow_mode);
  1918. idx++;
  1919. }
  1920. if (is_not_last_data)
  1921. set_din_not_last_indication(&desc[(idx - 1)]);
  1922. /* return updated desc sequence size */
  1923. *seq_size = idx;
  1924. }
  1925. static const void *cc_larval_digest(struct device *dev, u32 mode)
  1926. {
  1927. switch (mode) {
  1928. case DRV_HASH_MD5:
  1929. return cc_md5_init;
  1930. case DRV_HASH_SHA1:
  1931. return cc_sha1_init;
  1932. case DRV_HASH_SHA224:
  1933. return cc_sha224_init;
  1934. case DRV_HASH_SHA256:
  1935. return cc_sha256_init;
  1936. case DRV_HASH_SHA384:
  1937. return cc_sha384_init;
  1938. case DRV_HASH_SHA512:
  1939. return cc_sha512_init;
  1940. case DRV_HASH_SM3:
  1941. return cc_sm3_init;
  1942. default:
  1943. dev_err(dev, "Invalid hash mode (%d)\n", mode);
  1944. return cc_md5_init;
  1945. }
  1946. }
  1947. /**
  1948. * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
  1949. * according to the given hash mode
  1950. *
  1951. * @drvdata: Associated device driver context
  1952. * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
  1953. *
  1954. * Return:
  1955. * The address of the initial digest in SRAM
  1956. */
  1957. u32 cc_larval_digest_addr(void *drvdata, u32 mode)
  1958. {
  1959. struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
  1960. struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
  1961. struct device *dev = drvdata_to_dev(_drvdata);
  1962. bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
  1963. u32 addr;
  1964. switch (mode) {
  1965. case DRV_HASH_NULL:
  1966. break; /*Ignore*/
  1967. case DRV_HASH_MD5:
  1968. return (hash_handle->larval_digest_sram_addr);
  1969. case DRV_HASH_SHA1:
  1970. return (hash_handle->larval_digest_sram_addr +
  1971. sizeof(cc_md5_init));
  1972. case DRV_HASH_SHA224:
  1973. return (hash_handle->larval_digest_sram_addr +
  1974. sizeof(cc_md5_init) +
  1975. sizeof(cc_sha1_init));
  1976. case DRV_HASH_SHA256:
  1977. return (hash_handle->larval_digest_sram_addr +
  1978. sizeof(cc_md5_init) +
  1979. sizeof(cc_sha1_init) +
  1980. sizeof(cc_sha224_init));
  1981. case DRV_HASH_SM3:
  1982. return (hash_handle->larval_digest_sram_addr +
  1983. sizeof(cc_md5_init) +
  1984. sizeof(cc_sha1_init) +
  1985. sizeof(cc_sha224_init) +
  1986. sizeof(cc_sha256_init));
  1987. case DRV_HASH_SHA384:
  1988. addr = (hash_handle->larval_digest_sram_addr +
  1989. sizeof(cc_md5_init) +
  1990. sizeof(cc_sha1_init) +
  1991. sizeof(cc_sha224_init) +
  1992. sizeof(cc_sha256_init));
  1993. if (sm3_supported)
  1994. addr += sizeof(cc_sm3_init);
  1995. return addr;
  1996. case DRV_HASH_SHA512:
  1997. addr = (hash_handle->larval_digest_sram_addr +
  1998. sizeof(cc_md5_init) +
  1999. sizeof(cc_sha1_init) +
  2000. sizeof(cc_sha224_init) +
  2001. sizeof(cc_sha256_init) +
  2002. sizeof(cc_sha384_init));
  2003. if (sm3_supported)
  2004. addr += sizeof(cc_sm3_init);
  2005. return addr;
  2006. default:
  2007. dev_err(dev, "Invalid hash mode (%d)\n", mode);
  2008. }
  2009. /*This is valid wrong value to avoid kernel crash*/
  2010. return hash_handle->larval_digest_sram_addr;
  2011. }
  2012. u32 cc_digest_len_addr(void *drvdata, u32 mode)
  2013. {
  2014. struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
  2015. struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
  2016. u32 digest_len_addr = hash_handle->digest_len_sram_addr;
  2017. switch (mode) {
  2018. case DRV_HASH_SHA1:
  2019. case DRV_HASH_SHA224:
  2020. case DRV_HASH_SHA256:
  2021. case DRV_HASH_MD5:
  2022. return digest_len_addr;
  2023. case DRV_HASH_SHA384:
  2024. case DRV_HASH_SHA512:
  2025. return digest_len_addr + sizeof(cc_digest_len_init);
  2026. default:
  2027. return digest_len_addr; /*to avoid kernel crash*/
  2028. }
  2029. }