safexcel.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Marvell
  4. *
  5. * Antoine Tenart <[email protected]>
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dmapool.h>
  11. #include <linux/firmware.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/pci.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/workqueue.h>
  19. #include <crypto/internal/aead.h>
  20. #include <crypto/internal/hash.h>
  21. #include <crypto/internal/skcipher.h>
  22. #include "safexcel.h"
  23. static u32 max_rings = EIP197_MAX_RINGS;
  24. module_param(max_rings, uint, 0644);
  25. MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  26. static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
  27. {
  28. int i;
  29. /*
  30. * Map all interfaces/rings to register index 0
  31. * so they can share contexts. Without this, the EIP197 will
  32. * assume each interface/ring to be in its own memory domain
  33. * i.e. have its own subset of UNIQUE memory addresses.
  34. * Which would cause records with the SAME memory address to
  35. * use DIFFERENT cache buffers, causing both poor cache utilization
  36. * AND serious coherence/invalidation issues.
  37. */
  38. for (i = 0; i < 4; i++)
  39. writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
  40. /*
  41. * Initialize other virtualization regs for cache
  42. * These may not be in their reset state ...
  43. */
  44. for (i = 0; i < priv->config.rings; i++) {
  45. writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
  46. writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
  47. writel(EIP197_FLUE_CONFIG_MAGIC,
  48. priv->base + EIP197_FLUE_CONFIG(i));
  49. }
  50. writel(0, priv->base + EIP197_FLUE_OFFSETS);
  51. writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
  52. }
  53. static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
  54. u32 addrmid, int *actbank)
  55. {
  56. u32 val;
  57. int curbank;
  58. curbank = addrmid >> 16;
  59. if (curbank != *actbank) {
  60. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  61. val = (val & ~EIP197_CS_BANKSEL_MASK) |
  62. (curbank << EIP197_CS_BANKSEL_OFS);
  63. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  64. *actbank = curbank;
  65. }
  66. }
  67. static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
  68. int maxbanks, u32 probemask, u32 stride)
  69. {
  70. u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
  71. int actbank;
  72. /*
  73. * And probe the actual size of the physically attached cache data RAM
  74. * Using a binary subdivision algorithm downto 32 byte cache lines.
  75. */
  76. addrhi = 1 << (16 + maxbanks);
  77. addrlo = 0;
  78. actbank = min(maxbanks - 1, 0);
  79. while ((addrhi - addrlo) > stride) {
  80. /* write marker to lowest address in top half */
  81. addrmid = (addrhi + addrlo) >> 1;
  82. marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
  83. eip197_trc_cache_banksel(priv, addrmid, &actbank);
  84. writel(marker,
  85. priv->base + EIP197_CLASSIFICATION_RAMS +
  86. (addrmid & 0xffff));
  87. /* write invalid markers to possible aliases */
  88. delta = 1 << __fls(addrmid);
  89. while (delta >= stride) {
  90. addralias = addrmid - delta;
  91. eip197_trc_cache_banksel(priv, addralias, &actbank);
  92. writel(~marker,
  93. priv->base + EIP197_CLASSIFICATION_RAMS +
  94. (addralias & 0xffff));
  95. delta >>= 1;
  96. }
  97. /* read back marker from top half */
  98. eip197_trc_cache_banksel(priv, addrmid, &actbank);
  99. val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
  100. (addrmid & 0xffff));
  101. if ((val & probemask) == marker)
  102. /* read back correct, continue with top half */
  103. addrlo = addrmid;
  104. else
  105. /* not read back correct, continue with bottom half */
  106. addrhi = addrmid;
  107. }
  108. return addrhi;
  109. }
  110. static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
  111. int cs_rc_max, int cs_ht_wc)
  112. {
  113. int i;
  114. u32 htable_offset, val, offset;
  115. /* Clear all records in administration RAM */
  116. for (i = 0; i < cs_rc_max; i++) {
  117. offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  118. writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  119. EIP197_CS_RC_PREV(EIP197_RC_NULL),
  120. priv->base + offset);
  121. val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
  122. if (i == 0)
  123. val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  124. else if (i == cs_rc_max - 1)
  125. val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  126. writel(val, priv->base + offset + 4);
  127. /* must also initialize the address key due to ECC! */
  128. writel(0, priv->base + offset + 8);
  129. writel(0, priv->base + offset + 12);
  130. }
  131. /* Clear the hash table entries */
  132. htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
  133. for (i = 0; i < cs_ht_wc; i++)
  134. writel(GENMASK(29, 0),
  135. priv->base + EIP197_CLASSIFICATION_RAMS +
  136. htable_offset + i * sizeof(u32));
  137. }
  138. static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  139. {
  140. u32 val, dsize, asize;
  141. int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
  142. int cs_rc_abs_max, cs_ht_sz;
  143. int maxbanks;
  144. /* Setup (dummy) virtualization for cache */
  145. eip197_trc_cache_setupvirt(priv);
  146. /*
  147. * Enable the record cache memory access and
  148. * probe the bank select width
  149. */
  150. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  151. val &= ~EIP197_TRC_ENABLE_MASK;
  152. val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
  153. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  154. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  155. maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
  156. /* Clear all ECC errors */
  157. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  158. /*
  159. * Make sure the cache memory is accessible by taking record cache into
  160. * reset. Need data memory access here, not admin access.
  161. */
  162. val = readl(priv->base + EIP197_TRC_PARAMS);
  163. val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
  164. writel(val, priv->base + EIP197_TRC_PARAMS);
  165. /* Probed data RAM size in bytes */
  166. dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
  167. /*
  168. * Now probe the administration RAM size pretty much the same way
  169. * Except that only the lower 30 bits are writable and we don't need
  170. * bank selects
  171. */
  172. val = readl(priv->base + EIP197_TRC_PARAMS);
  173. /* admin access now */
  174. val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
  175. writel(val, priv->base + EIP197_TRC_PARAMS);
  176. /* Probed admin RAM size in admin words */
  177. asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
  178. /* Clear any ECC errors detected while probing! */
  179. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  180. /* Sanity check probing results */
  181. if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
  182. dev_err(priv->dev, "Record cache probing failed (%d,%d).",
  183. dsize, asize);
  184. return -ENODEV;
  185. }
  186. /*
  187. * Determine optimal configuration from RAM sizes
  188. * Note that we assume that the physical RAM configuration is sane
  189. * Therefore, we don't do any parameter error checking here ...
  190. */
  191. /* For now, just use a single record format covering everything */
  192. cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
  193. cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
  194. /*
  195. * Step #1: How many records will physically fit?
  196. * Hard upper limit is 1023!
  197. */
  198. cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
  199. /* Step #2: Need at least 2 words in the admin RAM per record */
  200. cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
  201. /* Step #3: Determine log2 of hash table size */
  202. cs_ht_sz = __fls(asize - cs_rc_max) - 2;
  203. /* Step #4: determine current size of hash table in dwords */
  204. cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
  205. /* Step #5: add back excess words and see if we can fit more records */
  206. cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
  207. /* Clear the cache RAMs */
  208. eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
  209. /* Disable the record cache memory access */
  210. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  211. val &= ~EIP197_TRC_ENABLE_MASK;
  212. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  213. /* Write head and tail pointers of the record free chain */
  214. val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  215. EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
  216. writel(val, priv->base + EIP197_TRC_FREECHAIN);
  217. /* Configure the record cache #1 */
  218. val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
  219. EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
  220. writel(val, priv->base + EIP197_TRC_PARAMS2);
  221. /* Configure the record cache #2 */
  222. val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
  223. EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
  224. EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
  225. writel(val, priv->base + EIP197_TRC_PARAMS);
  226. dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
  227. dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
  228. return 0;
  229. }
  230. static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
  231. {
  232. int pe, i;
  233. u32 val;
  234. for (pe = 0; pe < priv->config.pes; pe++) {
  235. /* Configure the token FIFO's */
  236. writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
  237. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
  238. /* Clear the ICE scratchpad memory */
  239. val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
  240. val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
  241. EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
  242. EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
  243. EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
  244. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
  245. /* clear the scratchpad RAM using 32 bit writes only */
  246. for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
  247. writel(0, EIP197_PE(priv) +
  248. EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
  249. /* Reset the IFPP engine to make its program mem accessible */
  250. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  251. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  252. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  253. EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
  254. /* Reset the IPUE engine to make its program mem accessible */
  255. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  256. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  257. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  258. EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
  259. /* Enable access to all IFPP program memories */
  260. writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
  261. EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  262. /* bypass the OCE, if present */
  263. if (priv->flags & EIP197_OCE)
  264. writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
  265. EIP197_PE_DEBUG(pe));
  266. }
  267. }
  268. static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
  269. const struct firmware *fw)
  270. {
  271. const __be32 *data = (const __be32 *)fw->data;
  272. int i;
  273. /* Write the firmware */
  274. for (i = 0; i < fw->size / sizeof(u32); i++)
  275. writel(be32_to_cpu(data[i]),
  276. priv->base + EIP197_CLASSIFICATION_RAMS +
  277. i * sizeof(__be32));
  278. /* Exclude final 2 NOPs from size */
  279. return i - EIP197_FW_TERMINAL_NOPS;
  280. }
  281. /*
  282. * If FW is actual production firmware, then poll for its initialization
  283. * to complete and check if it is good for the HW, otherwise just return OK.
  284. */
  285. static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
  286. {
  287. int pe, pollcnt;
  288. u32 base, pollofs;
  289. if (fpp)
  290. pollofs = EIP197_FW_FPP_READY;
  291. else
  292. pollofs = EIP197_FW_PUE_READY;
  293. for (pe = 0; pe < priv->config.pes; pe++) {
  294. base = EIP197_PE_ICE_SCRATCH_RAM(pe);
  295. pollcnt = EIP197_FW_START_POLLCNT;
  296. while (pollcnt &&
  297. (readl_relaxed(EIP197_PE(priv) + base +
  298. pollofs) != 1)) {
  299. pollcnt--;
  300. }
  301. if (!pollcnt) {
  302. dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
  303. fpp, pe);
  304. return false;
  305. }
  306. }
  307. return true;
  308. }
  309. static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
  310. int ipuesz, int ifppsz, int minifw)
  311. {
  312. int pe;
  313. u32 val;
  314. for (pe = 0; pe < priv->config.pes; pe++) {
  315. /* Disable access to all program memory */
  316. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  317. /* Start IFPP microengines */
  318. if (minifw)
  319. val = 0;
  320. else
  321. val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
  322. EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
  323. EIP197_PE_ICE_UENG_DEBUG_RESET;
  324. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
  325. /* Start IPUE microengines */
  326. if (minifw)
  327. val = 0;
  328. else
  329. val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
  330. EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
  331. EIP197_PE_ICE_UENG_DEBUG_RESET;
  332. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
  333. }
  334. /* For miniFW startup, there is no initialization, so always succeed */
  335. if (minifw)
  336. return true;
  337. /* Wait until all the firmwares have properly started up */
  338. if (!poll_fw_ready(priv, 1))
  339. return false;
  340. if (!poll_fw_ready(priv, 0))
  341. return false;
  342. return true;
  343. }
  344. static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
  345. {
  346. const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
  347. const struct firmware *fw[FW_NB];
  348. char fw_path[37], *dir = NULL;
  349. int i, j, ret = 0, pe;
  350. int ipuesz, ifppsz, minifw = 0;
  351. if (priv->version == EIP197D_MRVL)
  352. dir = "eip197d";
  353. else if (priv->version == EIP197B_MRVL ||
  354. priv->version == EIP197_DEVBRD)
  355. dir = "eip197b";
  356. else
  357. return -ENODEV;
  358. retry_fw:
  359. for (i = 0; i < FW_NB; i++) {
  360. snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
  361. ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
  362. if (ret) {
  363. if (minifw || priv->version != EIP197B_MRVL)
  364. goto release_fw;
  365. /* Fallback to the old firmware location for the
  366. * EIP197b.
  367. */
  368. ret = firmware_request_nowarn(&fw[i], fw_name[i],
  369. priv->dev);
  370. if (ret)
  371. goto release_fw;
  372. }
  373. }
  374. eip197_init_firmware(priv);
  375. ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
  376. /* Enable access to IPUE program memories */
  377. for (pe = 0; pe < priv->config.pes; pe++)
  378. writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
  379. EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  380. ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
  381. if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
  382. dev_dbg(priv->dev, "Firmware loaded successfully\n");
  383. return 0;
  384. }
  385. ret = -ENODEV;
  386. release_fw:
  387. for (j = 0; j < i; j++)
  388. release_firmware(fw[j]);
  389. if (!minifw) {
  390. /* Retry with minifw path */
  391. dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
  392. dir = "eip197_minifw";
  393. minifw = 1;
  394. goto retry_fw;
  395. }
  396. dev_dbg(priv->dev, "Firmware load failed.\n");
  397. return ret;
  398. }
  399. static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
  400. {
  401. u32 cd_size_rnd, val;
  402. int i, cd_fetch_cnt;
  403. cd_size_rnd = (priv->config.cd_size +
  404. (BIT(priv->hwconfig.hwdataw) - 1)) >>
  405. priv->hwconfig.hwdataw;
  406. /* determine number of CD's we can fetch into the CD FIFO as 1 block */
  407. if (priv->flags & SAFEXCEL_HW_EIP197) {
  408. /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
  409. cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
  410. cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
  411. (priv->config.pes * EIP197_FETCH_DEPTH));
  412. } else {
  413. /* for the EIP97, just fetch all that fits minus 1 */
  414. cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
  415. cd_size_rnd) - 1;
  416. }
  417. /*
  418. * Since we're using command desc's way larger than formally specified,
  419. * we need to check whether we can fit even 1 for low-end EIP196's!
  420. */
  421. if (!cd_fetch_cnt) {
  422. dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
  423. return -ENODEV;
  424. }
  425. for (i = 0; i < priv->config.rings; i++) {
  426. /* ring base address */
  427. writel(lower_32_bits(priv->ring[i].cdr.base_dma),
  428. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  429. writel(upper_32_bits(priv->ring[i].cdr.base_dma),
  430. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  431. writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
  432. (priv->config.cd_offset << 14) | priv->config.cd_size,
  433. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  434. writel(((cd_fetch_cnt *
  435. (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
  436. (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
  437. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  438. /* Configure DMA tx control */
  439. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  440. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  441. writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  442. /* clear any pending interrupt */
  443. writel(GENMASK(5, 0),
  444. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  445. }
  446. return 0;
  447. }
  448. static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
  449. {
  450. u32 rd_size_rnd, val;
  451. int i, rd_fetch_cnt;
  452. /* determine number of RD's we can fetch into the FIFO as one block */
  453. rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
  454. (BIT(priv->hwconfig.hwdataw) - 1)) >>
  455. priv->hwconfig.hwdataw;
  456. if (priv->flags & SAFEXCEL_HW_EIP197) {
  457. /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
  458. rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
  459. rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
  460. (priv->config.pes * EIP197_FETCH_DEPTH));
  461. } else {
  462. /* for the EIP97, just fetch all that fits minus 1 */
  463. rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
  464. rd_size_rnd) - 1;
  465. }
  466. for (i = 0; i < priv->config.rings; i++) {
  467. /* ring base address */
  468. writel(lower_32_bits(priv->ring[i].rdr.base_dma),
  469. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  470. writel(upper_32_bits(priv->ring[i].rdr.base_dma),
  471. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  472. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
  473. priv->config.rd_size,
  474. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  475. writel(((rd_fetch_cnt *
  476. (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
  477. (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
  478. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  479. /* Configure DMA tx control */
  480. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  481. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  482. val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
  483. writel(val,
  484. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  485. /* clear any pending interrupt */
  486. writel(GENMASK(7, 0),
  487. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  488. /* enable ring interrupt */
  489. val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  490. val |= EIP197_RDR_IRQ(i);
  491. writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  492. }
  493. return 0;
  494. }
  495. static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
  496. {
  497. u32 val;
  498. int i, ret, pe, opbuflo, opbufhi;
  499. dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
  500. priv->config.pes, priv->config.rings);
  501. /*
  502. * For EIP197's only set maximum number of TX commands to 2^5 = 32
  503. * Skip for the EIP97 as it does not have this field.
  504. */
  505. if (priv->flags & SAFEXCEL_HW_EIP197) {
  506. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  507. val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
  508. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  509. }
  510. /* Configure wr/rd cache values */
  511. writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
  512. EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
  513. EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
  514. /* Interrupts reset */
  515. /* Disable all global interrupts */
  516. writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
  517. /* Clear any pending interrupt */
  518. writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  519. /* Processing Engine configuration */
  520. for (pe = 0; pe < priv->config.pes; pe++) {
  521. /* Data Fetch Engine configuration */
  522. /* Reset all DFE threads */
  523. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  524. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  525. if (priv->flags & EIP197_PE_ARB)
  526. /* Reset HIA input interface arbiter (if present) */
  527. writel(EIP197_HIA_RA_PE_CTRL_RESET,
  528. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
  529. /* DMA transfer size to use */
  530. val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
  531. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
  532. EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
  533. val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
  534. EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
  535. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
  536. val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
  537. writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
  538. /* Leave the DFE threads reset state */
  539. writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  540. /* Configure the processing engine thresholds */
  541. writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
  542. EIP197_PE_IN_xBUF_THRES_MAX(9),
  543. EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
  544. writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
  545. EIP197_PE_IN_xBUF_THRES_MAX(7),
  546. EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
  547. if (priv->flags & SAFEXCEL_HW_EIP197)
  548. /* enable HIA input interface arbiter and rings */
  549. writel(EIP197_HIA_RA_PE_CTRL_EN |
  550. GENMASK(priv->config.rings - 1, 0),
  551. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
  552. /* Data Store Engine configuration */
  553. /* Reset all DSE threads */
  554. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  555. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  556. /* Wait for all DSE threads to complete */
  557. while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
  558. GENMASK(15, 12)) != GENMASK(15, 12))
  559. ;
  560. /* DMA transfer size to use */
  561. if (priv->hwconfig.hwnumpes > 4) {
  562. opbuflo = 9;
  563. opbufhi = 10;
  564. } else {
  565. opbuflo = 7;
  566. opbufhi = 8;
  567. }
  568. val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
  569. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
  570. EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
  571. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
  572. val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
  573. /* FIXME: instability issues can occur for EIP97 but disabling
  574. * it impacts performance.
  575. */
  576. if (priv->flags & SAFEXCEL_HW_EIP197)
  577. val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
  578. writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
  579. /* Leave the DSE threads reset state */
  580. writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  581. /* Configure the processing engine thresholds */
  582. writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
  583. EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
  584. EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
  585. /* Processing Engine configuration */
  586. /* Token & context configuration */
  587. val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
  588. EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
  589. EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
  590. writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
  591. /* H/W capabilities selection: just enable everything */
  592. writel(EIP197_FUNCTION_ALL,
  593. EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
  594. writel(EIP197_FUNCTION_ALL,
  595. EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
  596. }
  597. /* Command Descriptor Rings prepare */
  598. for (i = 0; i < priv->config.rings; i++) {
  599. /* Clear interrupts for this ring */
  600. writel(GENMASK(31, 0),
  601. EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
  602. /* Disable external triggering */
  603. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  604. /* Clear the pending prepared counter */
  605. writel(EIP197_xDR_PREP_CLR_COUNT,
  606. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  607. /* Clear the pending processed counter */
  608. writel(EIP197_xDR_PROC_CLR_COUNT,
  609. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  610. writel(0,
  611. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  612. writel(0,
  613. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  614. writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
  615. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  616. }
  617. /* Result Descriptor Ring prepare */
  618. for (i = 0; i < priv->config.rings; i++) {
  619. /* Disable external triggering*/
  620. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  621. /* Clear the pending prepared counter */
  622. writel(EIP197_xDR_PREP_CLR_COUNT,
  623. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  624. /* Clear the pending processed counter */
  625. writel(EIP197_xDR_PROC_CLR_COUNT,
  626. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  627. writel(0,
  628. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  629. writel(0,
  630. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  631. /* Ring size */
  632. writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
  633. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  634. }
  635. for (pe = 0; pe < priv->config.pes; pe++) {
  636. /* Enable command descriptor rings */
  637. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  638. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  639. /* Enable result descriptor rings */
  640. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  641. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  642. }
  643. /* Clear any HIA interrupt */
  644. writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  645. if (priv->flags & EIP197_SIMPLE_TRC) {
  646. writel(EIP197_STRC_CONFIG_INIT |
  647. EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
  648. EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
  649. priv->base + EIP197_STRC_CONFIG);
  650. writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
  651. EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
  652. } else if (priv->flags & SAFEXCEL_HW_EIP197) {
  653. ret = eip197_trc_cache_init(priv);
  654. if (ret)
  655. return ret;
  656. }
  657. if (priv->flags & EIP197_ICE) {
  658. ret = eip197_load_firmwares(priv);
  659. if (ret)
  660. return ret;
  661. }
  662. return safexcel_hw_setup_cdesc_rings(priv) ?:
  663. safexcel_hw_setup_rdesc_rings(priv) ?:
  664. 0;
  665. }
  666. /* Called with ring's lock taken */
  667. static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
  668. int ring)
  669. {
  670. int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
  671. if (!coal)
  672. return;
  673. /* Configure when we want an interrupt */
  674. writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
  675. EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
  676. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
  677. }
  678. void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
  679. {
  680. struct crypto_async_request *req, *backlog;
  681. struct safexcel_context *ctx;
  682. int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
  683. /* If a request wasn't properly dequeued because of a lack of resources,
  684. * proceeded it first,
  685. */
  686. req = priv->ring[ring].req;
  687. backlog = priv->ring[ring].backlog;
  688. if (req)
  689. goto handle_req;
  690. while (true) {
  691. spin_lock_bh(&priv->ring[ring].queue_lock);
  692. backlog = crypto_get_backlog(&priv->ring[ring].queue);
  693. req = crypto_dequeue_request(&priv->ring[ring].queue);
  694. spin_unlock_bh(&priv->ring[ring].queue_lock);
  695. if (!req) {
  696. priv->ring[ring].req = NULL;
  697. priv->ring[ring].backlog = NULL;
  698. goto finalize;
  699. }
  700. handle_req:
  701. ctx = crypto_tfm_ctx(req->tfm);
  702. ret = ctx->send(req, ring, &commands, &results);
  703. if (ret)
  704. goto request_failed;
  705. if (backlog)
  706. backlog->complete(backlog, -EINPROGRESS);
  707. /* In case the send() helper did not issue any command to push
  708. * to the engine because the input data was cached, continue to
  709. * dequeue other requests as this is valid and not an error.
  710. */
  711. if (!commands && !results)
  712. continue;
  713. cdesc += commands;
  714. rdesc += results;
  715. nreq++;
  716. }
  717. request_failed:
  718. /* Not enough resources to handle all the requests. Bail out and save
  719. * the request and the backlog for the next dequeue call (per-ring).
  720. */
  721. priv->ring[ring].req = req;
  722. priv->ring[ring].backlog = backlog;
  723. finalize:
  724. if (!nreq)
  725. return;
  726. spin_lock_bh(&priv->ring[ring].lock);
  727. priv->ring[ring].requests += nreq;
  728. if (!priv->ring[ring].busy) {
  729. safexcel_try_push_requests(priv, ring);
  730. priv->ring[ring].busy = true;
  731. }
  732. spin_unlock_bh(&priv->ring[ring].lock);
  733. /* let the RDR know we have pending descriptors */
  734. writel((rdesc * priv->config.rd_offset),
  735. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  736. /* let the CDR know we have pending descriptors */
  737. writel((cdesc * priv->config.cd_offset),
  738. EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  739. }
  740. inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
  741. void *rdp)
  742. {
  743. struct safexcel_result_desc *rdesc = rdp;
  744. struct result_data_desc *result_data = rdp + priv->config.res_offset;
  745. if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
  746. ((!rdesc->descriptor_overflow) &&
  747. (!rdesc->buffer_overflow) &&
  748. (!result_data->error_code))))
  749. return 0;
  750. if (rdesc->descriptor_overflow)
  751. dev_err(priv->dev, "Descriptor overflow detected");
  752. if (rdesc->buffer_overflow)
  753. dev_err(priv->dev, "Buffer overflow detected");
  754. if (result_data->error_code & 0x4066) {
  755. /* Fatal error (bits 1,2,5,6 & 14) */
  756. dev_err(priv->dev,
  757. "result descriptor error (%x)",
  758. result_data->error_code);
  759. return -EIO;
  760. } else if (result_data->error_code &
  761. (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
  762. /*
  763. * Give priority over authentication fails:
  764. * Blocksize, length & overflow errors,
  765. * something wrong with the input!
  766. */
  767. return -EINVAL;
  768. } else if (result_data->error_code & BIT(9)) {
  769. /* Authentication failed */
  770. return -EBADMSG;
  771. }
  772. /* All other non-fatal errors */
  773. return -EINVAL;
  774. }
  775. inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
  776. int ring,
  777. struct safexcel_result_desc *rdesc,
  778. struct crypto_async_request *req)
  779. {
  780. int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
  781. priv->ring[ring].rdr_req[i] = req;
  782. }
  783. inline struct crypto_async_request *
  784. safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
  785. {
  786. int i = safexcel_ring_first_rdr_index(priv, ring);
  787. return priv->ring[ring].rdr_req[i];
  788. }
  789. void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
  790. {
  791. struct safexcel_command_desc *cdesc;
  792. /* Acknowledge the command descriptors */
  793. do {
  794. cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
  795. if (IS_ERR(cdesc)) {
  796. dev_err(priv->dev,
  797. "Could not retrieve the command descriptor\n");
  798. return;
  799. }
  800. } while (!cdesc->last_seg);
  801. }
  802. void safexcel_inv_complete(struct crypto_async_request *req, int error)
  803. {
  804. struct safexcel_inv_result *result = req->data;
  805. if (error == -EINPROGRESS)
  806. return;
  807. result->error = error;
  808. complete(&result->completion);
  809. }
  810. int safexcel_invalidate_cache(struct crypto_async_request *async,
  811. struct safexcel_crypto_priv *priv,
  812. dma_addr_t ctxr_dma, int ring)
  813. {
  814. struct safexcel_command_desc *cdesc;
  815. struct safexcel_result_desc *rdesc;
  816. struct safexcel_token *dmmy;
  817. int ret = 0;
  818. /* Prepare command descriptor */
  819. cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
  820. &dmmy);
  821. if (IS_ERR(cdesc))
  822. return PTR_ERR(cdesc);
  823. cdesc->control_data.type = EIP197_TYPE_EXTENDED;
  824. cdesc->control_data.options = 0;
  825. cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
  826. cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
  827. /* Prepare result descriptor */
  828. rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
  829. if (IS_ERR(rdesc)) {
  830. ret = PTR_ERR(rdesc);
  831. goto cdesc_rollback;
  832. }
  833. safexcel_rdr_req_set(priv, ring, rdesc, async);
  834. return ret;
  835. cdesc_rollback:
  836. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  837. return ret;
  838. }
  839. static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
  840. int ring)
  841. {
  842. struct crypto_async_request *req;
  843. struct safexcel_context *ctx;
  844. int ret, i, nreq, ndesc, tot_descs, handled = 0;
  845. bool should_complete;
  846. handle_results:
  847. tot_descs = 0;
  848. nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  849. nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
  850. nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
  851. if (!nreq)
  852. goto requests_left;
  853. for (i = 0; i < nreq; i++) {
  854. req = safexcel_rdr_req_get(priv, ring);
  855. ctx = crypto_tfm_ctx(req->tfm);
  856. ndesc = ctx->handle_result(priv, ring, req,
  857. &should_complete, &ret);
  858. if (ndesc < 0) {
  859. dev_err(priv->dev, "failed to handle result (%d)\n",
  860. ndesc);
  861. goto acknowledge;
  862. }
  863. if (should_complete) {
  864. local_bh_disable();
  865. req->complete(req, ret);
  866. local_bh_enable();
  867. }
  868. tot_descs += ndesc;
  869. handled++;
  870. }
  871. acknowledge:
  872. if (i)
  873. writel(EIP197_xDR_PROC_xD_PKT(i) |
  874. (tot_descs * priv->config.rd_offset),
  875. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  876. /* If the number of requests overflowed the counter, try to proceed more
  877. * requests.
  878. */
  879. if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
  880. goto handle_results;
  881. requests_left:
  882. spin_lock_bh(&priv->ring[ring].lock);
  883. priv->ring[ring].requests -= handled;
  884. safexcel_try_push_requests(priv, ring);
  885. if (!priv->ring[ring].requests)
  886. priv->ring[ring].busy = false;
  887. spin_unlock_bh(&priv->ring[ring].lock);
  888. }
  889. static void safexcel_dequeue_work(struct work_struct *work)
  890. {
  891. struct safexcel_work_data *data =
  892. container_of(work, struct safexcel_work_data, work);
  893. safexcel_dequeue(data->priv, data->ring);
  894. }
  895. struct safexcel_ring_irq_data {
  896. struct safexcel_crypto_priv *priv;
  897. int ring;
  898. };
  899. static irqreturn_t safexcel_irq_ring(int irq, void *data)
  900. {
  901. struct safexcel_ring_irq_data *irq_data = data;
  902. struct safexcel_crypto_priv *priv = irq_data->priv;
  903. int ring = irq_data->ring, rc = IRQ_NONE;
  904. u32 status, stat;
  905. status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
  906. if (!status)
  907. return rc;
  908. /* RDR interrupts */
  909. if (status & EIP197_RDR_IRQ(ring)) {
  910. stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  911. if (unlikely(stat & EIP197_xDR_ERR)) {
  912. /*
  913. * Fatal error, the RDR is unusable and must be
  914. * reinitialized. This should not happen under
  915. * normal circumstances.
  916. */
  917. dev_err(priv->dev, "RDR: fatal error.\n");
  918. } else if (likely(stat & EIP197_xDR_THRESH)) {
  919. rc = IRQ_WAKE_THREAD;
  920. }
  921. /* ACK the interrupts */
  922. writel(stat & 0xff,
  923. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  924. }
  925. /* ACK the interrupts */
  926. writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
  927. return rc;
  928. }
  929. static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
  930. {
  931. struct safexcel_ring_irq_data *irq_data = data;
  932. struct safexcel_crypto_priv *priv = irq_data->priv;
  933. int ring = irq_data->ring;
  934. safexcel_handle_result_descriptor(priv, ring);
  935. queue_work(priv->ring[ring].workqueue,
  936. &priv->ring[ring].work_data.work);
  937. return IRQ_HANDLED;
  938. }
  939. static int safexcel_request_ring_irq(void *pdev, int irqid,
  940. int is_pci_dev,
  941. int ring_id,
  942. irq_handler_t handler,
  943. irq_handler_t threaded_handler,
  944. struct safexcel_ring_irq_data *ring_irq_priv)
  945. {
  946. int ret, irq, cpu;
  947. struct device *dev;
  948. if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
  949. struct pci_dev *pci_pdev = pdev;
  950. dev = &pci_pdev->dev;
  951. irq = pci_irq_vector(pci_pdev, irqid);
  952. if (irq < 0) {
  953. dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
  954. irqid, irq);
  955. return irq;
  956. }
  957. } else if (IS_ENABLED(CONFIG_OF)) {
  958. struct platform_device *plf_pdev = pdev;
  959. char irq_name[6] = {0}; /* "ringX\0" */
  960. snprintf(irq_name, 6, "ring%d", irqid);
  961. dev = &plf_pdev->dev;
  962. irq = platform_get_irq_byname(plf_pdev, irq_name);
  963. if (irq < 0)
  964. return irq;
  965. } else {
  966. return -ENXIO;
  967. }
  968. ret = devm_request_threaded_irq(dev, irq, handler,
  969. threaded_handler, IRQF_ONESHOT,
  970. dev_name(dev), ring_irq_priv);
  971. if (ret) {
  972. dev_err(dev, "unable to request IRQ %d\n", irq);
  973. return ret;
  974. }
  975. /* Set affinity */
  976. cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
  977. irq_set_affinity_hint(irq, get_cpu_mask(cpu));
  978. return irq;
  979. }
  980. static struct safexcel_alg_template *safexcel_algs[] = {
  981. &safexcel_alg_ecb_des,
  982. &safexcel_alg_cbc_des,
  983. &safexcel_alg_ecb_des3_ede,
  984. &safexcel_alg_cbc_des3_ede,
  985. &safexcel_alg_ecb_aes,
  986. &safexcel_alg_cbc_aes,
  987. &safexcel_alg_cfb_aes,
  988. &safexcel_alg_ofb_aes,
  989. &safexcel_alg_ctr_aes,
  990. &safexcel_alg_md5,
  991. &safexcel_alg_sha1,
  992. &safexcel_alg_sha224,
  993. &safexcel_alg_sha256,
  994. &safexcel_alg_sha384,
  995. &safexcel_alg_sha512,
  996. &safexcel_alg_hmac_md5,
  997. &safexcel_alg_hmac_sha1,
  998. &safexcel_alg_hmac_sha224,
  999. &safexcel_alg_hmac_sha256,
  1000. &safexcel_alg_hmac_sha384,
  1001. &safexcel_alg_hmac_sha512,
  1002. &safexcel_alg_authenc_hmac_sha1_cbc_aes,
  1003. &safexcel_alg_authenc_hmac_sha224_cbc_aes,
  1004. &safexcel_alg_authenc_hmac_sha256_cbc_aes,
  1005. &safexcel_alg_authenc_hmac_sha384_cbc_aes,
  1006. &safexcel_alg_authenc_hmac_sha512_cbc_aes,
  1007. &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
  1008. &safexcel_alg_authenc_hmac_sha1_ctr_aes,
  1009. &safexcel_alg_authenc_hmac_sha224_ctr_aes,
  1010. &safexcel_alg_authenc_hmac_sha256_ctr_aes,
  1011. &safexcel_alg_authenc_hmac_sha384_ctr_aes,
  1012. &safexcel_alg_authenc_hmac_sha512_ctr_aes,
  1013. &safexcel_alg_xts_aes,
  1014. &safexcel_alg_gcm,
  1015. &safexcel_alg_ccm,
  1016. &safexcel_alg_crc32,
  1017. &safexcel_alg_cbcmac,
  1018. &safexcel_alg_xcbcmac,
  1019. &safexcel_alg_cmac,
  1020. &safexcel_alg_chacha20,
  1021. &safexcel_alg_chachapoly,
  1022. &safexcel_alg_chachapoly_esp,
  1023. &safexcel_alg_sm3,
  1024. &safexcel_alg_hmac_sm3,
  1025. &safexcel_alg_ecb_sm4,
  1026. &safexcel_alg_cbc_sm4,
  1027. &safexcel_alg_ofb_sm4,
  1028. &safexcel_alg_cfb_sm4,
  1029. &safexcel_alg_ctr_sm4,
  1030. &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
  1031. &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
  1032. &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
  1033. &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
  1034. &safexcel_alg_sha3_224,
  1035. &safexcel_alg_sha3_256,
  1036. &safexcel_alg_sha3_384,
  1037. &safexcel_alg_sha3_512,
  1038. &safexcel_alg_hmac_sha3_224,
  1039. &safexcel_alg_hmac_sha3_256,
  1040. &safexcel_alg_hmac_sha3_384,
  1041. &safexcel_alg_hmac_sha3_512,
  1042. &safexcel_alg_authenc_hmac_sha1_cbc_des,
  1043. &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
  1044. &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
  1045. &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
  1046. &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
  1047. &safexcel_alg_authenc_hmac_sha256_cbc_des,
  1048. &safexcel_alg_authenc_hmac_sha224_cbc_des,
  1049. &safexcel_alg_authenc_hmac_sha512_cbc_des,
  1050. &safexcel_alg_authenc_hmac_sha384_cbc_des,
  1051. &safexcel_alg_rfc4106_gcm,
  1052. &safexcel_alg_rfc4543_gcm,
  1053. &safexcel_alg_rfc4309_ccm,
  1054. };
  1055. static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
  1056. {
  1057. int i, j, ret = 0;
  1058. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  1059. safexcel_algs[i]->priv = priv;
  1060. /* Do we have all required base algorithms available? */
  1061. if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
  1062. safexcel_algs[i]->algo_mask)
  1063. /* No, so don't register this ciphersuite */
  1064. continue;
  1065. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  1066. ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
  1067. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  1068. ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
  1069. else
  1070. ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
  1071. if (ret)
  1072. goto fail;
  1073. }
  1074. return 0;
  1075. fail:
  1076. for (j = 0; j < i; j++) {
  1077. /* Do we have all required base algorithms available? */
  1078. if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
  1079. safexcel_algs[j]->algo_mask)
  1080. /* No, so don't unregister this ciphersuite */
  1081. continue;
  1082. if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  1083. crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
  1084. else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
  1085. crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
  1086. else
  1087. crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
  1088. }
  1089. return ret;
  1090. }
  1091. static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
  1092. {
  1093. int i;
  1094. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  1095. /* Do we have all required base algorithms available? */
  1096. if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
  1097. safexcel_algs[i]->algo_mask)
  1098. /* No, so don't unregister this ciphersuite */
  1099. continue;
  1100. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  1101. crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
  1102. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  1103. crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
  1104. else
  1105. crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
  1106. }
  1107. }
  1108. static void safexcel_configure(struct safexcel_crypto_priv *priv)
  1109. {
  1110. u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
  1111. priv->config.pes = priv->hwconfig.hwnumpes;
  1112. priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
  1113. /* Cannot currently support more rings than we have ring AICs! */
  1114. priv->config.rings = min_t(u32, priv->config.rings,
  1115. priv->hwconfig.hwnumraic);
  1116. priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
  1117. priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
  1118. priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
  1119. /* res token is behind the descr, but ofs must be rounded to buswdth */
  1120. priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
  1121. /* now the size of the descr is this 1st part plus the result struct */
  1122. priv->config.rd_size = priv->config.res_offset +
  1123. EIP197_RD64_RESULT_SIZE;
  1124. priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
  1125. /* convert dwords to bytes */
  1126. priv->config.cd_offset *= sizeof(u32);
  1127. priv->config.cdsh_offset *= sizeof(u32);
  1128. priv->config.rd_offset *= sizeof(u32);
  1129. priv->config.res_offset *= sizeof(u32);
  1130. }
  1131. static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
  1132. {
  1133. struct safexcel_register_offsets *offsets = &priv->offsets;
  1134. if (priv->flags & SAFEXCEL_HW_EIP197) {
  1135. offsets->hia_aic = EIP197_HIA_AIC_BASE;
  1136. offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
  1137. offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
  1138. offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
  1139. offsets->hia_dfe = EIP197_HIA_DFE_BASE;
  1140. offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
  1141. offsets->hia_dse = EIP197_HIA_DSE_BASE;
  1142. offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
  1143. offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
  1144. offsets->pe = EIP197_PE_BASE;
  1145. offsets->global = EIP197_GLOBAL_BASE;
  1146. } else {
  1147. offsets->hia_aic = EIP97_HIA_AIC_BASE;
  1148. offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
  1149. offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
  1150. offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
  1151. offsets->hia_dfe = EIP97_HIA_DFE_BASE;
  1152. offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
  1153. offsets->hia_dse = EIP97_HIA_DSE_BASE;
  1154. offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
  1155. offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
  1156. offsets->pe = EIP97_PE_BASE;
  1157. offsets->global = EIP97_GLOBAL_BASE;
  1158. }
  1159. }
  1160. /*
  1161. * Generic part of probe routine, shared by platform and PCI driver
  1162. *
  1163. * Assumes IO resources have been mapped, private data mem has been allocated,
  1164. * clocks have been enabled, device pointer has been assigned etc.
  1165. *
  1166. */
  1167. static int safexcel_probe_generic(void *pdev,
  1168. struct safexcel_crypto_priv *priv,
  1169. int is_pci_dev)
  1170. {
  1171. struct device *dev = priv->dev;
  1172. u32 peid, version, mask, val, hiaopt, hwopt, peopt;
  1173. int i, ret, hwctg;
  1174. priv->context_pool = dmam_pool_create("safexcel-context", dev,
  1175. sizeof(struct safexcel_context_record),
  1176. 1, 0);
  1177. if (!priv->context_pool)
  1178. return -ENOMEM;
  1179. /*
  1180. * First try the EIP97 HIA version regs
  1181. * For the EIP197, this is guaranteed to NOT return any of the test
  1182. * values
  1183. */
  1184. version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
  1185. mask = 0; /* do not swap */
  1186. if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
  1187. priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
  1188. } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
  1189. /* read back byte-swapped, so complement byte swap bits */
  1190. mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
  1191. priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
  1192. } else {
  1193. /* So it wasn't an EIP97 ... maybe it's an EIP197? */
  1194. version = readl(priv->base + EIP197_HIA_AIC_BASE +
  1195. EIP197_HIA_VERSION);
  1196. if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
  1197. priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
  1198. priv->flags |= SAFEXCEL_HW_EIP197;
  1199. } else if (EIP197_REG_HI16(version) ==
  1200. EIP197_HIA_VERSION_BE) {
  1201. /* read back byte-swapped, so complement swap bits */
  1202. mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
  1203. priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
  1204. priv->flags |= SAFEXCEL_HW_EIP197;
  1205. } else {
  1206. return -ENODEV;
  1207. }
  1208. }
  1209. /* Now initialize the reg offsets based on the probing info so far */
  1210. safexcel_init_register_offsets(priv);
  1211. /*
  1212. * If the version was read byte-swapped, we need to flip the device
  1213. * swapping Keep in mind here, though, that what we write will also be
  1214. * byte-swapped ...
  1215. */
  1216. if (mask) {
  1217. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  1218. val = val ^ (mask >> 24); /* toggle byte swap bits */
  1219. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  1220. }
  1221. /*
  1222. * We're not done probing yet! We may fall through to here if no HIA
  1223. * was found at all. So, with the endianness presumably correct now and
  1224. * the offsets setup, *really* probe for the EIP97/EIP197.
  1225. */
  1226. version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
  1227. if (((priv->flags & SAFEXCEL_HW_EIP197) &&
  1228. (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
  1229. (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
  1230. ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
  1231. (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
  1232. /*
  1233. * We did not find the device that matched our initial probing
  1234. * (or our initial probing failed) Report appropriate error.
  1235. */
  1236. dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
  1237. version);
  1238. return -ENODEV;
  1239. }
  1240. priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
  1241. hwctg = version >> 28;
  1242. peid = version & 255;
  1243. /* Detect EIP206 processing pipe */
  1244. version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
  1245. if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
  1246. dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
  1247. return -ENODEV;
  1248. }
  1249. priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
  1250. /* Detect EIP96 packet engine and version */
  1251. version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
  1252. if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
  1253. dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
  1254. return -ENODEV;
  1255. }
  1256. priv->hwconfig.pever = EIP197_VERSION_MASK(version);
  1257. hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
  1258. hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
  1259. priv->hwconfig.icever = 0;
  1260. priv->hwconfig.ocever = 0;
  1261. priv->hwconfig.psever = 0;
  1262. if (priv->flags & SAFEXCEL_HW_EIP197) {
  1263. /* EIP197 */
  1264. peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
  1265. priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
  1266. EIP197_HWDATAW_MASK;
  1267. priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
  1268. EIP197_CFSIZE_MASK) +
  1269. EIP197_CFSIZE_ADJUST;
  1270. priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
  1271. EIP197_RFSIZE_MASK) +
  1272. EIP197_RFSIZE_ADJUST;
  1273. priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
  1274. EIP197_N_PES_MASK;
  1275. priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
  1276. EIP197_N_RINGS_MASK;
  1277. if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
  1278. priv->flags |= EIP197_PE_ARB;
  1279. if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
  1280. priv->flags |= EIP197_ICE;
  1281. /* Detect ICE EIP207 class. engine and version */
  1282. version = readl(EIP197_PE(priv) +
  1283. EIP197_PE_ICE_VERSION(0));
  1284. if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
  1285. dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
  1286. peid);
  1287. return -ENODEV;
  1288. }
  1289. priv->hwconfig.icever = EIP197_VERSION_MASK(version);
  1290. }
  1291. if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
  1292. priv->flags |= EIP197_OCE;
  1293. /* Detect EIP96PP packet stream editor and version */
  1294. version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
  1295. if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
  1296. dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
  1297. return -ENODEV;
  1298. }
  1299. priv->hwconfig.psever = EIP197_VERSION_MASK(version);
  1300. /* Detect OCE EIP207 class. engine and version */
  1301. version = readl(EIP197_PE(priv) +
  1302. EIP197_PE_ICE_VERSION(0));
  1303. if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
  1304. dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
  1305. peid);
  1306. return -ENODEV;
  1307. }
  1308. priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
  1309. }
  1310. /* If not a full TRC, then assume simple TRC */
  1311. if (!(hwopt & EIP197_OPT_HAS_TRC))
  1312. priv->flags |= EIP197_SIMPLE_TRC;
  1313. /* EIP197 always has SOME form of TRC */
  1314. priv->flags |= EIP197_TRC_CACHE;
  1315. } else {
  1316. /* EIP97 */
  1317. priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
  1318. EIP97_HWDATAW_MASK;
  1319. priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
  1320. EIP97_CFSIZE_MASK;
  1321. priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
  1322. EIP97_RFSIZE_MASK;
  1323. priv->hwconfig.hwnumpes = 1; /* by definition */
  1324. priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
  1325. EIP197_N_RINGS_MASK;
  1326. }
  1327. /* Scan for ring AIC's */
  1328. for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
  1329. version = readl(EIP197_HIA_AIC_R(priv) +
  1330. EIP197_HIA_AIC_R_VERSION(i));
  1331. if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
  1332. break;
  1333. }
  1334. priv->hwconfig.hwnumraic = i;
  1335. /* Low-end EIP196 may not have any ring AIC's ... */
  1336. if (!priv->hwconfig.hwnumraic) {
  1337. dev_err(priv->dev, "No ring interrupt controller present!\n");
  1338. return -ENODEV;
  1339. }
  1340. /* Get supported algorithms from EIP96 transform engine */
  1341. priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
  1342. EIP197_PE_EIP96_OPTIONS(0));
  1343. /* Print single info line describing what we just detected */
  1344. dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
  1345. peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
  1346. priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
  1347. priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
  1348. priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
  1349. priv->hwconfig.ppver, priv->hwconfig.pever,
  1350. priv->hwconfig.algo_flags, priv->hwconfig.icever,
  1351. priv->hwconfig.ocever, priv->hwconfig.psever);
  1352. safexcel_configure(priv);
  1353. if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
  1354. /*
  1355. * Request MSI vectors for global + 1 per ring -
  1356. * or just 1 for older dev images
  1357. */
  1358. struct pci_dev *pci_pdev = pdev;
  1359. ret = pci_alloc_irq_vectors(pci_pdev,
  1360. priv->config.rings + 1,
  1361. priv->config.rings + 1,
  1362. PCI_IRQ_MSI | PCI_IRQ_MSIX);
  1363. if (ret < 0) {
  1364. dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
  1365. return ret;
  1366. }
  1367. }
  1368. /* Register the ring IRQ handlers and configure the rings */
  1369. priv->ring = devm_kcalloc(dev, priv->config.rings,
  1370. sizeof(*priv->ring),
  1371. GFP_KERNEL);
  1372. if (!priv->ring)
  1373. return -ENOMEM;
  1374. for (i = 0; i < priv->config.rings; i++) {
  1375. char wq_name[9] = {0};
  1376. int irq;
  1377. struct safexcel_ring_irq_data *ring_irq;
  1378. ret = safexcel_init_ring_descriptors(priv,
  1379. &priv->ring[i].cdr,
  1380. &priv->ring[i].rdr);
  1381. if (ret) {
  1382. dev_err(dev, "Failed to initialize rings\n");
  1383. goto err_cleanup_rings;
  1384. }
  1385. priv->ring[i].rdr_req = devm_kcalloc(dev,
  1386. EIP197_DEFAULT_RING_SIZE,
  1387. sizeof(*priv->ring[i].rdr_req),
  1388. GFP_KERNEL);
  1389. if (!priv->ring[i].rdr_req) {
  1390. ret = -ENOMEM;
  1391. goto err_cleanup_rings;
  1392. }
  1393. ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
  1394. if (!ring_irq) {
  1395. ret = -ENOMEM;
  1396. goto err_cleanup_rings;
  1397. }
  1398. ring_irq->priv = priv;
  1399. ring_irq->ring = i;
  1400. irq = safexcel_request_ring_irq(pdev,
  1401. EIP197_IRQ_NUMBER(i, is_pci_dev),
  1402. is_pci_dev,
  1403. i,
  1404. safexcel_irq_ring,
  1405. safexcel_irq_ring_thread,
  1406. ring_irq);
  1407. if (irq < 0) {
  1408. dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
  1409. ret = irq;
  1410. goto err_cleanup_rings;
  1411. }
  1412. priv->ring[i].irq = irq;
  1413. priv->ring[i].work_data.priv = priv;
  1414. priv->ring[i].work_data.ring = i;
  1415. INIT_WORK(&priv->ring[i].work_data.work,
  1416. safexcel_dequeue_work);
  1417. snprintf(wq_name, 9, "wq_ring%d", i);
  1418. priv->ring[i].workqueue =
  1419. create_singlethread_workqueue(wq_name);
  1420. if (!priv->ring[i].workqueue) {
  1421. ret = -ENOMEM;
  1422. goto err_cleanup_rings;
  1423. }
  1424. priv->ring[i].requests = 0;
  1425. priv->ring[i].busy = false;
  1426. crypto_init_queue(&priv->ring[i].queue,
  1427. EIP197_DEFAULT_RING_SIZE);
  1428. spin_lock_init(&priv->ring[i].lock);
  1429. spin_lock_init(&priv->ring[i].queue_lock);
  1430. }
  1431. atomic_set(&priv->ring_used, 0);
  1432. ret = safexcel_hw_init(priv);
  1433. if (ret) {
  1434. dev_err(dev, "HW init failed (%d)\n", ret);
  1435. goto err_cleanup_rings;
  1436. }
  1437. ret = safexcel_register_algorithms(priv);
  1438. if (ret) {
  1439. dev_err(dev, "Failed to register algorithms (%d)\n", ret);
  1440. goto err_cleanup_rings;
  1441. }
  1442. return 0;
  1443. err_cleanup_rings:
  1444. for (i = 0; i < priv->config.rings; i++) {
  1445. if (priv->ring[i].irq)
  1446. irq_set_affinity_hint(priv->ring[i].irq, NULL);
  1447. if (priv->ring[i].workqueue)
  1448. destroy_workqueue(priv->ring[i].workqueue);
  1449. }
  1450. return ret;
  1451. }
  1452. static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
  1453. {
  1454. int i;
  1455. for (i = 0; i < priv->config.rings; i++) {
  1456. /* clear any pending interrupt */
  1457. writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  1458. writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  1459. /* Reset the CDR base address */
  1460. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  1461. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  1462. /* Reset the RDR base address */
  1463. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  1464. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  1465. }
  1466. }
  1467. /* for Device Tree platform driver */
  1468. static int safexcel_probe(struct platform_device *pdev)
  1469. {
  1470. struct device *dev = &pdev->dev;
  1471. struct safexcel_crypto_priv *priv;
  1472. int ret;
  1473. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1474. if (!priv)
  1475. return -ENOMEM;
  1476. priv->dev = dev;
  1477. priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
  1478. platform_set_drvdata(pdev, priv);
  1479. priv->base = devm_platform_ioremap_resource(pdev, 0);
  1480. if (IS_ERR(priv->base)) {
  1481. dev_err(dev, "failed to get resource\n");
  1482. return PTR_ERR(priv->base);
  1483. }
  1484. priv->clk = devm_clk_get(&pdev->dev, NULL);
  1485. ret = PTR_ERR_OR_ZERO(priv->clk);
  1486. /* The clock isn't mandatory */
  1487. if (ret != -ENOENT) {
  1488. if (ret)
  1489. return ret;
  1490. ret = clk_prepare_enable(priv->clk);
  1491. if (ret) {
  1492. dev_err(dev, "unable to enable clk (%d)\n", ret);
  1493. return ret;
  1494. }
  1495. }
  1496. priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
  1497. ret = PTR_ERR_OR_ZERO(priv->reg_clk);
  1498. /* The clock isn't mandatory */
  1499. if (ret != -ENOENT) {
  1500. if (ret)
  1501. goto err_core_clk;
  1502. ret = clk_prepare_enable(priv->reg_clk);
  1503. if (ret) {
  1504. dev_err(dev, "unable to enable reg clk (%d)\n", ret);
  1505. goto err_core_clk;
  1506. }
  1507. }
  1508. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  1509. if (ret)
  1510. goto err_reg_clk;
  1511. /* Generic EIP97/EIP197 device probing */
  1512. ret = safexcel_probe_generic(pdev, priv, 0);
  1513. if (ret)
  1514. goto err_reg_clk;
  1515. return 0;
  1516. err_reg_clk:
  1517. clk_disable_unprepare(priv->reg_clk);
  1518. err_core_clk:
  1519. clk_disable_unprepare(priv->clk);
  1520. return ret;
  1521. }
  1522. static int safexcel_remove(struct platform_device *pdev)
  1523. {
  1524. struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
  1525. int i;
  1526. safexcel_unregister_algorithms(priv);
  1527. safexcel_hw_reset_rings(priv);
  1528. clk_disable_unprepare(priv->reg_clk);
  1529. clk_disable_unprepare(priv->clk);
  1530. for (i = 0; i < priv->config.rings; i++) {
  1531. irq_set_affinity_hint(priv->ring[i].irq, NULL);
  1532. destroy_workqueue(priv->ring[i].workqueue);
  1533. }
  1534. return 0;
  1535. }
  1536. static const struct of_device_id safexcel_of_match_table[] = {
  1537. {
  1538. .compatible = "inside-secure,safexcel-eip97ies",
  1539. .data = (void *)EIP97IES_MRVL,
  1540. },
  1541. {
  1542. .compatible = "inside-secure,safexcel-eip197b",
  1543. .data = (void *)EIP197B_MRVL,
  1544. },
  1545. {
  1546. .compatible = "inside-secure,safexcel-eip197d",
  1547. .data = (void *)EIP197D_MRVL,
  1548. },
  1549. /* For backward compatibility and intended for generic use */
  1550. {
  1551. .compatible = "inside-secure,safexcel-eip97",
  1552. .data = (void *)EIP97IES_MRVL,
  1553. },
  1554. {
  1555. .compatible = "inside-secure,safexcel-eip197",
  1556. .data = (void *)EIP197B_MRVL,
  1557. },
  1558. {},
  1559. };
  1560. MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
  1561. static struct platform_driver crypto_safexcel = {
  1562. .probe = safexcel_probe,
  1563. .remove = safexcel_remove,
  1564. .driver = {
  1565. .name = "crypto-safexcel",
  1566. .of_match_table = safexcel_of_match_table,
  1567. },
  1568. };
  1569. /* PCIE devices - i.e. Inside Secure development boards */
  1570. static int safexcel_pci_probe(struct pci_dev *pdev,
  1571. const struct pci_device_id *ent)
  1572. {
  1573. struct device *dev = &pdev->dev;
  1574. struct safexcel_crypto_priv *priv;
  1575. void __iomem *pciebase;
  1576. int rc;
  1577. u32 val;
  1578. dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
  1579. ent->vendor, ent->device, ent->subvendor,
  1580. ent->subdevice, ent->driver_data);
  1581. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  1582. if (!priv)
  1583. return -ENOMEM;
  1584. priv->dev = dev;
  1585. priv->version = (enum safexcel_eip_version)ent->driver_data;
  1586. pci_set_drvdata(pdev, priv);
  1587. /* enable the device */
  1588. rc = pcim_enable_device(pdev);
  1589. if (rc) {
  1590. dev_err(dev, "Failed to enable PCI device\n");
  1591. return rc;
  1592. }
  1593. /* take ownership of PCI BAR0 */
  1594. rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
  1595. if (rc) {
  1596. dev_err(dev, "Failed to map IO region for BAR0\n");
  1597. return rc;
  1598. }
  1599. priv->base = pcim_iomap_table(pdev)[0];
  1600. if (priv->version == EIP197_DEVBRD) {
  1601. dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
  1602. rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
  1603. if (rc) {
  1604. dev_err(dev, "Failed to map IO region for BAR4\n");
  1605. return rc;
  1606. }
  1607. pciebase = pcim_iomap_table(pdev)[2];
  1608. val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
  1609. if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
  1610. dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
  1611. (val & 0xff));
  1612. /* Setup MSI identity map mapping */
  1613. writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
  1614. pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
  1615. writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
  1616. pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
  1617. writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
  1618. pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
  1619. writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
  1620. pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
  1621. /* Enable all device interrupts */
  1622. writel(GENMASK(31, 0),
  1623. pciebase + EIP197_XLX_USER_INT_ENB_MSK);
  1624. } else {
  1625. dev_err(dev, "Unrecognised IRQ block identifier %x\n",
  1626. val);
  1627. return -ENODEV;
  1628. }
  1629. /* HW reset FPGA dev board */
  1630. /* assert reset */
  1631. writel(1, priv->base + EIP197_XLX_GPIO_BASE);
  1632. wmb(); /* maintain strict ordering for accesses here */
  1633. /* deassert reset */
  1634. writel(0, priv->base + EIP197_XLX_GPIO_BASE);
  1635. wmb(); /* maintain strict ordering for accesses here */
  1636. }
  1637. /* enable bus mastering */
  1638. pci_set_master(pdev);
  1639. /* Generic EIP97/EIP197 device probing */
  1640. rc = safexcel_probe_generic(pdev, priv, 1);
  1641. return rc;
  1642. }
  1643. static void safexcel_pci_remove(struct pci_dev *pdev)
  1644. {
  1645. struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
  1646. int i;
  1647. safexcel_unregister_algorithms(priv);
  1648. for (i = 0; i < priv->config.rings; i++)
  1649. destroy_workqueue(priv->ring[i].workqueue);
  1650. safexcel_hw_reset_rings(priv);
  1651. }
  1652. static const struct pci_device_id safexcel_pci_ids[] = {
  1653. {
  1654. PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
  1655. 0x16ae, 0xc522),
  1656. .driver_data = EIP197_DEVBRD,
  1657. },
  1658. {},
  1659. };
  1660. MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
  1661. static struct pci_driver safexcel_pci_driver = {
  1662. .name = "crypto-safexcel",
  1663. .id_table = safexcel_pci_ids,
  1664. .probe = safexcel_pci_probe,
  1665. .remove = safexcel_pci_remove,
  1666. };
  1667. static int __init safexcel_init(void)
  1668. {
  1669. int ret;
  1670. /* Register PCI driver */
  1671. ret = pci_register_driver(&safexcel_pci_driver);
  1672. /* Register platform driver */
  1673. if (IS_ENABLED(CONFIG_OF) && !ret) {
  1674. ret = platform_driver_register(&crypto_safexcel);
  1675. if (ret)
  1676. pci_unregister_driver(&safexcel_pci_driver);
  1677. }
  1678. return ret;
  1679. }
  1680. static void __exit safexcel_exit(void)
  1681. {
  1682. /* Unregister platform driver */
  1683. if (IS_ENABLED(CONFIG_OF))
  1684. platform_driver_unregister(&crypto_safexcel);
  1685. /* Unregister PCI driver if successfully registered before */
  1686. pci_unregister_driver(&safexcel_pci_driver);
  1687. }
  1688. module_init(safexcel_init);
  1689. module_exit(safexcel_exit);
  1690. MODULE_AUTHOR("Antoine Tenart <[email protected]>");
  1691. MODULE_AUTHOR("Ofer Heifetz <[email protected]>");
  1692. MODULE_AUTHOR("Igal Liberman <[email protected]>");
  1693. MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
  1694. MODULE_LICENSE("GPL v2");
  1695. MODULE_IMPORT_NS(CRYPTO_INTERNAL);
  1696. MODULE_FIRMWARE("ifpp.bin");
  1697. MODULE_FIRMWARE("ipue.bin");
  1698. MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin");
  1699. MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin");
  1700. MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin");
  1701. MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin");
  1702. MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin");
  1703. MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin");