artpec6_crypto.c 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
  4. *
  5. * Copyright (C) 2014-2017 Axis Communications AB
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/bitfield.h>
  9. #include <linux/crypto.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/fault-inject.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/kernel.h>
  17. #include <linux/list.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/scatterlist.h>
  22. #include <linux/slab.h>
  23. #include <crypto/aes.h>
  24. #include <crypto/gcm.h>
  25. #include <crypto/internal/aead.h>
  26. #include <crypto/internal/hash.h>
  27. #include <crypto/internal/skcipher.h>
  28. #include <crypto/scatterwalk.h>
  29. #include <crypto/sha1.h>
  30. #include <crypto/sha2.h>
  31. #include <crypto/xts.h>
  32. /* Max length of a line in all cache levels for Artpec SoCs. */
  33. #define ARTPEC_CACHE_LINE_MAX 32
  34. #define PDMA_OUT_CFG 0x0000
  35. #define PDMA_OUT_BUF_CFG 0x0004
  36. #define PDMA_OUT_CMD 0x0008
  37. #define PDMA_OUT_DESCRQ_PUSH 0x0010
  38. #define PDMA_OUT_DESCRQ_STAT 0x0014
  39. #define A6_PDMA_IN_CFG 0x0028
  40. #define A6_PDMA_IN_BUF_CFG 0x002c
  41. #define A6_PDMA_IN_CMD 0x0030
  42. #define A6_PDMA_IN_STATQ_PUSH 0x0038
  43. #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
  44. #define A6_PDMA_IN_DESCRQ_STAT 0x0048
  45. #define A6_PDMA_INTR_MASK 0x0068
  46. #define A6_PDMA_ACK_INTR 0x006c
  47. #define A6_PDMA_MASKED_INTR 0x0074
  48. #define A7_PDMA_IN_CFG 0x002c
  49. #define A7_PDMA_IN_BUF_CFG 0x0030
  50. #define A7_PDMA_IN_CMD 0x0034
  51. #define A7_PDMA_IN_STATQ_PUSH 0x003c
  52. #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
  53. #define A7_PDMA_IN_DESCRQ_STAT 0x004C
  54. #define A7_PDMA_INTR_MASK 0x006c
  55. #define A7_PDMA_ACK_INTR 0x0070
  56. #define A7_PDMA_MASKED_INTR 0x0078
  57. #define PDMA_OUT_CFG_EN BIT(0)
  58. #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
  59. #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
  60. #define PDMA_OUT_CMD_START BIT(0)
  61. #define A6_PDMA_OUT_CMD_STOP BIT(3)
  62. #define A7_PDMA_OUT_CMD_STOP BIT(2)
  63. #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
  64. #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
  65. #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
  66. #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
  67. #define PDMA_IN_CFG_EN BIT(0)
  68. #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
  69. #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
  70. #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
  71. #define PDMA_IN_CMD_START BIT(0)
  72. #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
  73. #define A6_PDMA_IN_CMD_STOP BIT(3)
  74. #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
  75. #define A7_PDMA_IN_CMD_STOP BIT(2)
  76. #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
  77. #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
  78. #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
  79. #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
  80. #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
  81. #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
  82. #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
  83. #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
  84. #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
  85. #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
  86. #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
  87. #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
  88. #define A6_CRY_MD_OPER GENMASK(19, 16)
  89. #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
  90. #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
  91. #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
  92. #define A6_CRY_MD_CIPHER_DECR BIT(22)
  93. #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
  94. #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
  95. #define A7_CRY_MD_OPER GENMASK(11, 8)
  96. #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
  97. #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
  98. #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
  99. #define A7_CRY_MD_CIPHER_DECR BIT(14)
  100. #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
  101. #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
  102. /* DMA metadata constants */
  103. #define regk_crypto_aes_cbc 0x00000002
  104. #define regk_crypto_aes_ctr 0x00000003
  105. #define regk_crypto_aes_ecb 0x00000001
  106. #define regk_crypto_aes_gcm 0x00000004
  107. #define regk_crypto_aes_xts 0x00000005
  108. #define regk_crypto_cache 0x00000002
  109. #define a6_regk_crypto_dlkey 0x0000000a
  110. #define a7_regk_crypto_dlkey 0x0000000e
  111. #define regk_crypto_ext 0x00000001
  112. #define regk_crypto_hmac_sha1 0x00000007
  113. #define regk_crypto_hmac_sha256 0x00000009
  114. #define regk_crypto_init 0x00000000
  115. #define regk_crypto_key_128 0x00000000
  116. #define regk_crypto_key_192 0x00000001
  117. #define regk_crypto_key_256 0x00000002
  118. #define regk_crypto_null 0x00000000
  119. #define regk_crypto_sha1 0x00000006
  120. #define regk_crypto_sha256 0x00000008
  121. /* DMA descriptor structures */
  122. struct pdma_descr_ctrl {
  123. unsigned char short_descr : 1;
  124. unsigned char pad1 : 1;
  125. unsigned char eop : 1;
  126. unsigned char intr : 1;
  127. unsigned char short_len : 3;
  128. unsigned char pad2 : 1;
  129. } __packed;
  130. struct pdma_data_descr {
  131. unsigned int len : 24;
  132. unsigned int buf : 32;
  133. } __packed;
  134. struct pdma_short_descr {
  135. unsigned char data[7];
  136. } __packed;
  137. struct pdma_descr {
  138. struct pdma_descr_ctrl ctrl;
  139. union {
  140. struct pdma_data_descr data;
  141. struct pdma_short_descr shrt;
  142. };
  143. };
  144. struct pdma_stat_descr {
  145. unsigned char pad1 : 1;
  146. unsigned char pad2 : 1;
  147. unsigned char eop : 1;
  148. unsigned char pad3 : 5;
  149. unsigned int len : 24;
  150. };
  151. /* Each descriptor array can hold max 64 entries */
  152. #define PDMA_DESCR_COUNT 64
  153. #define MODULE_NAME "Artpec-6 CA"
  154. /* Hash modes (including HMAC variants) */
  155. #define ARTPEC6_CRYPTO_HASH_SHA1 1
  156. #define ARTPEC6_CRYPTO_HASH_SHA256 2
  157. /* Crypto modes */
  158. #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
  159. #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
  160. #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
  161. #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
  162. /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
  163. * It operates on a descriptor array with up to 64 descriptor entries.
  164. * The arrays must be 64 byte aligned in memory.
  165. *
  166. * The ciphering unit has no registers and is completely controlled by
  167. * a 4-byte metadata that is inserted at the beginning of each dma packet.
  168. *
  169. * A dma packet is a sequence of descriptors terminated by setting the .eop
  170. * field in the final descriptor of the packet.
  171. *
  172. * Multiple packets are used for providing context data, key data and
  173. * the plain/ciphertext.
  174. *
  175. * PDMA Descriptors (Array)
  176. * +------+------+------+~~+-------+------+----
  177. * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
  178. * +--+---+--+---+----+-+~~+-------+----+-+----
  179. * | | | | |
  180. * | | | | |
  181. * __|__ +-------++-------++-------+ +----+
  182. * | MD | |Payload||Payload||Payload| | MD |
  183. * +-----+ +-------++-------++-------+ +----+
  184. */
  185. struct artpec6_crypto_bounce_buffer {
  186. struct list_head list;
  187. size_t length;
  188. struct scatterlist *sg;
  189. size_t offset;
  190. /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
  191. * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
  192. */
  193. void *buf;
  194. };
  195. struct artpec6_crypto_dma_map {
  196. dma_addr_t dma_addr;
  197. size_t size;
  198. enum dma_data_direction dir;
  199. };
  200. struct artpec6_crypto_dma_descriptors {
  201. struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
  202. struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
  203. u32 stat[PDMA_DESCR_COUNT] __aligned(64);
  204. struct list_head bounce_buffers;
  205. /* Enough maps for all out/in buffers, and all three descr. arrays */
  206. struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
  207. dma_addr_t out_dma_addr;
  208. dma_addr_t in_dma_addr;
  209. dma_addr_t stat_dma_addr;
  210. size_t out_cnt;
  211. size_t in_cnt;
  212. size_t map_count;
  213. };
  214. enum artpec6_crypto_variant {
  215. ARTPEC6_CRYPTO,
  216. ARTPEC7_CRYPTO,
  217. };
  218. struct artpec6_crypto {
  219. void __iomem *base;
  220. spinlock_t queue_lock;
  221. struct list_head queue; /* waiting for pdma fifo space */
  222. struct list_head pending; /* submitted to pdma fifo */
  223. struct tasklet_struct task;
  224. struct kmem_cache *dma_cache;
  225. int pending_count;
  226. struct timer_list timer;
  227. enum artpec6_crypto_variant variant;
  228. void *pad_buffer; /* cache-aligned block padding buffer */
  229. void *zero_buffer;
  230. };
  231. enum artpec6_crypto_hash_flags {
  232. HASH_FLAG_INIT_CTX = 2,
  233. HASH_FLAG_UPDATE = 4,
  234. HASH_FLAG_FINALIZE = 8,
  235. HASH_FLAG_HMAC = 16,
  236. HASH_FLAG_UPDATE_KEY = 32,
  237. };
  238. struct artpec6_crypto_req_common {
  239. struct list_head list;
  240. struct list_head complete_in_progress;
  241. struct artpec6_crypto_dma_descriptors *dma;
  242. struct crypto_async_request *req;
  243. void (*complete)(struct crypto_async_request *req);
  244. gfp_t gfp_flags;
  245. };
  246. struct artpec6_hash_request_context {
  247. char partial_buffer[SHA256_BLOCK_SIZE];
  248. char partial_buffer_out[SHA256_BLOCK_SIZE];
  249. char key_buffer[SHA256_BLOCK_SIZE];
  250. char pad_buffer[SHA256_BLOCK_SIZE + 32];
  251. unsigned char digeststate[SHA256_DIGEST_SIZE];
  252. size_t partial_bytes;
  253. u64 digcnt;
  254. u32 key_md;
  255. u32 hash_md;
  256. enum artpec6_crypto_hash_flags hash_flags;
  257. struct artpec6_crypto_req_common common;
  258. };
  259. struct artpec6_hash_export_state {
  260. char partial_buffer[SHA256_BLOCK_SIZE];
  261. unsigned char digeststate[SHA256_DIGEST_SIZE];
  262. size_t partial_bytes;
  263. u64 digcnt;
  264. int oper;
  265. unsigned int hash_flags;
  266. };
  267. struct artpec6_hashalg_context {
  268. char hmac_key[SHA256_BLOCK_SIZE];
  269. size_t hmac_key_length;
  270. struct crypto_shash *child_hash;
  271. };
  272. struct artpec6_crypto_request_context {
  273. u32 cipher_md;
  274. bool decrypt;
  275. struct artpec6_crypto_req_common common;
  276. };
  277. struct artpec6_cryptotfm_context {
  278. unsigned char aes_key[2*AES_MAX_KEY_SIZE];
  279. size_t key_length;
  280. u32 key_md;
  281. int crypto_type;
  282. struct crypto_sync_skcipher *fallback;
  283. };
  284. struct artpec6_crypto_aead_hw_ctx {
  285. __be64 aad_length_bits;
  286. __be64 text_length_bits;
  287. __u8 J0[AES_BLOCK_SIZE];
  288. };
  289. struct artpec6_crypto_aead_req_ctx {
  290. struct artpec6_crypto_aead_hw_ctx hw_ctx;
  291. u32 cipher_md;
  292. bool decrypt;
  293. struct artpec6_crypto_req_common common;
  294. __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
  295. };
  296. /* The crypto framework makes it hard to avoid this global. */
  297. static struct device *artpec6_crypto_dev;
  298. #ifdef CONFIG_FAULT_INJECTION
  299. static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
  300. static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
  301. #endif
  302. enum {
  303. ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
  304. ARTPEC6_CRYPTO_PREPARE_HASH_START,
  305. };
  306. static int artpec6_crypto_prepare_aead(struct aead_request *areq);
  307. static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
  308. static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
  309. static void
  310. artpec6_crypto_complete_crypto(struct crypto_async_request *req);
  311. static void
  312. artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
  313. static void
  314. artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
  315. static void
  316. artpec6_crypto_complete_aead(struct crypto_async_request *req);
  317. static void
  318. artpec6_crypto_complete_hash(struct crypto_async_request *req);
  319. static int
  320. artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
  321. static void
  322. artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
  323. struct artpec6_crypto_walk {
  324. struct scatterlist *sg;
  325. size_t offset;
  326. };
  327. static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
  328. struct scatterlist *sg)
  329. {
  330. awalk->sg = sg;
  331. awalk->offset = 0;
  332. }
  333. static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
  334. size_t nbytes)
  335. {
  336. while (nbytes && awalk->sg) {
  337. size_t piece;
  338. WARN_ON(awalk->offset > awalk->sg->length);
  339. piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
  340. nbytes -= piece;
  341. awalk->offset += piece;
  342. if (awalk->offset == awalk->sg->length) {
  343. awalk->sg = sg_next(awalk->sg);
  344. awalk->offset = 0;
  345. }
  346. }
  347. return nbytes;
  348. }
  349. static size_t
  350. artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
  351. {
  352. WARN_ON(awalk->sg->length == awalk->offset);
  353. return awalk->sg->length - awalk->offset;
  354. }
  355. static dma_addr_t
  356. artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
  357. {
  358. return sg_phys(awalk->sg) + awalk->offset;
  359. }
  360. static void
  361. artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
  362. {
  363. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  364. struct artpec6_crypto_bounce_buffer *b;
  365. struct artpec6_crypto_bounce_buffer *next;
  366. list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
  367. pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
  368. b, b->length, b->offset, b->buf);
  369. sg_pcopy_from_buffer(b->sg,
  370. 1,
  371. b->buf,
  372. b->length,
  373. b->offset);
  374. list_del(&b->list);
  375. kfree(b);
  376. }
  377. }
  378. static inline bool artpec6_crypto_busy(void)
  379. {
  380. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  381. int fifo_count = ac->pending_count;
  382. return fifo_count > 6;
  383. }
  384. static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
  385. {
  386. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  387. int ret = -EBUSY;
  388. spin_lock_bh(&ac->queue_lock);
  389. if (!artpec6_crypto_busy()) {
  390. list_add_tail(&req->list, &ac->pending);
  391. artpec6_crypto_start_dma(req);
  392. ret = -EINPROGRESS;
  393. } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
  394. list_add_tail(&req->list, &ac->queue);
  395. } else {
  396. artpec6_crypto_common_destroy(req);
  397. }
  398. spin_unlock_bh(&ac->queue_lock);
  399. return ret;
  400. }
  401. static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
  402. {
  403. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  404. enum artpec6_crypto_variant variant = ac->variant;
  405. void __iomem *base = ac->base;
  406. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  407. u32 ind, statd, outd;
  408. /* Make descriptor content visible to the DMA before starting it. */
  409. wmb();
  410. ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
  411. FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
  412. statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
  413. FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
  414. outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
  415. FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
  416. if (variant == ARTPEC6_CRYPTO) {
  417. writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
  418. writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
  419. writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
  420. } else {
  421. writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
  422. writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
  423. writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
  424. }
  425. writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
  426. writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
  427. ac->pending_count++;
  428. }
  429. static void
  430. artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
  431. {
  432. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  433. dma->out_cnt = 0;
  434. dma->in_cnt = 0;
  435. dma->map_count = 0;
  436. INIT_LIST_HEAD(&dma->bounce_buffers);
  437. }
  438. static bool fault_inject_dma_descr(void)
  439. {
  440. #ifdef CONFIG_FAULT_INJECTION
  441. return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
  442. #else
  443. return false;
  444. #endif
  445. }
  446. /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
  447. * physical address
  448. *
  449. * @addr: The physical address of the data buffer
  450. * @len: The length of the data buffer
  451. * @eop: True if this is the last buffer in the packet
  452. *
  453. * @return 0 on success or -ENOSPC if there are no more descriptors available
  454. */
  455. static int
  456. artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
  457. dma_addr_t addr, size_t len, bool eop)
  458. {
  459. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  460. struct pdma_descr *d;
  461. if (dma->out_cnt >= PDMA_DESCR_COUNT ||
  462. fault_inject_dma_descr()) {
  463. pr_err("No free OUT DMA descriptors available!\n");
  464. return -ENOSPC;
  465. }
  466. d = &dma->out[dma->out_cnt++];
  467. memset(d, 0, sizeof(*d));
  468. d->ctrl.short_descr = 0;
  469. d->ctrl.eop = eop;
  470. d->data.len = len;
  471. d->data.buf = addr;
  472. return 0;
  473. }
  474. /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
  475. *
  476. * @dst: The virtual address of the data
  477. * @len: The length of the data, must be between 1 to 7 bytes
  478. * @eop: True if this is the last buffer in the packet
  479. *
  480. * @return 0 on success
  481. * -ENOSPC if no more descriptors are available
  482. * -EINVAL if the data length exceeds 7 bytes
  483. */
  484. static int
  485. artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
  486. void *dst, unsigned int len, bool eop)
  487. {
  488. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  489. struct pdma_descr *d;
  490. if (dma->out_cnt >= PDMA_DESCR_COUNT ||
  491. fault_inject_dma_descr()) {
  492. pr_err("No free OUT DMA descriptors available!\n");
  493. return -ENOSPC;
  494. } else if (len > 7 || len < 1) {
  495. return -EINVAL;
  496. }
  497. d = &dma->out[dma->out_cnt++];
  498. memset(d, 0, sizeof(*d));
  499. d->ctrl.short_descr = 1;
  500. d->ctrl.short_len = len;
  501. d->ctrl.eop = eop;
  502. memcpy(d->shrt.data, dst, len);
  503. return 0;
  504. }
  505. static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
  506. struct page *page, size_t offset,
  507. size_t size,
  508. enum dma_data_direction dir,
  509. dma_addr_t *dma_addr_out)
  510. {
  511. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  512. struct device *dev = artpec6_crypto_dev;
  513. struct artpec6_crypto_dma_map *map;
  514. dma_addr_t dma_addr;
  515. *dma_addr_out = 0;
  516. if (dma->map_count >= ARRAY_SIZE(dma->maps))
  517. return -ENOMEM;
  518. dma_addr = dma_map_page(dev, page, offset, size, dir);
  519. if (dma_mapping_error(dev, dma_addr))
  520. return -ENOMEM;
  521. map = &dma->maps[dma->map_count++];
  522. map->size = size;
  523. map->dma_addr = dma_addr;
  524. map->dir = dir;
  525. *dma_addr_out = dma_addr;
  526. return 0;
  527. }
  528. static int
  529. artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
  530. void *ptr, size_t size,
  531. enum dma_data_direction dir,
  532. dma_addr_t *dma_addr_out)
  533. {
  534. struct page *page = virt_to_page(ptr);
  535. size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
  536. return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
  537. dma_addr_out);
  538. }
  539. static int
  540. artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
  541. {
  542. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  543. int ret;
  544. ret = artpec6_crypto_dma_map_single(common, dma->in,
  545. sizeof(dma->in[0]) * dma->in_cnt,
  546. DMA_TO_DEVICE, &dma->in_dma_addr);
  547. if (ret)
  548. return ret;
  549. ret = artpec6_crypto_dma_map_single(common, dma->out,
  550. sizeof(dma->out[0]) * dma->out_cnt,
  551. DMA_TO_DEVICE, &dma->out_dma_addr);
  552. if (ret)
  553. return ret;
  554. /* We only read one stat descriptor */
  555. dma->stat[dma->in_cnt - 1] = 0;
  556. /*
  557. * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
  558. * to be written.
  559. */
  560. return artpec6_crypto_dma_map_single(common,
  561. dma->stat,
  562. sizeof(dma->stat[0]) * dma->in_cnt,
  563. DMA_BIDIRECTIONAL,
  564. &dma->stat_dma_addr);
  565. }
  566. static void
  567. artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
  568. {
  569. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  570. struct device *dev = artpec6_crypto_dev;
  571. int i;
  572. for (i = 0; i < dma->map_count; i++) {
  573. struct artpec6_crypto_dma_map *map = &dma->maps[i];
  574. dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
  575. }
  576. dma->map_count = 0;
  577. }
  578. /** artpec6_crypto_setup_out_descr - Setup an out descriptor
  579. *
  580. * @dst: The virtual address of the data
  581. * @len: The length of the data
  582. * @eop: True if this is the last buffer in the packet
  583. * @use_short: If this is true and the data length is 7 bytes or less then
  584. * a short descriptor will be used
  585. *
  586. * @return 0 on success
  587. * Any errors from artpec6_crypto_setup_out_descr_short() or
  588. * setup_out_descr_phys()
  589. */
  590. static int
  591. artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
  592. void *dst, unsigned int len, bool eop,
  593. bool use_short)
  594. {
  595. if (use_short && len < 7) {
  596. return artpec6_crypto_setup_out_descr_short(common, dst, len,
  597. eop);
  598. } else {
  599. int ret;
  600. dma_addr_t dma_addr;
  601. ret = artpec6_crypto_dma_map_single(common, dst, len,
  602. DMA_TO_DEVICE,
  603. &dma_addr);
  604. if (ret)
  605. return ret;
  606. return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
  607. len, eop);
  608. }
  609. }
  610. /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
  611. * physical address
  612. *
  613. * @addr: The physical address of the data buffer
  614. * @len: The length of the data buffer
  615. * @intr: True if an interrupt should be fired after HW processing of this
  616. * descriptor
  617. *
  618. */
  619. static int
  620. artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
  621. dma_addr_t addr, unsigned int len, bool intr)
  622. {
  623. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  624. struct pdma_descr *d;
  625. if (dma->in_cnt >= PDMA_DESCR_COUNT ||
  626. fault_inject_dma_descr()) {
  627. pr_err("No free IN DMA descriptors available!\n");
  628. return -ENOSPC;
  629. }
  630. d = &dma->in[dma->in_cnt++];
  631. memset(d, 0, sizeof(*d));
  632. d->ctrl.intr = intr;
  633. d->data.len = len;
  634. d->data.buf = addr;
  635. return 0;
  636. }
  637. /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
  638. *
  639. * @buffer: The virtual address to of the data buffer
  640. * @len: The length of the data buffer
  641. * @last: If this is the last data buffer in the request (i.e. an interrupt
  642. * is needed
  643. *
  644. * Short descriptors are not used for the in channel
  645. */
  646. static int
  647. artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
  648. void *buffer, unsigned int len, bool last)
  649. {
  650. dma_addr_t dma_addr;
  651. int ret;
  652. ret = artpec6_crypto_dma_map_single(common, buffer, len,
  653. DMA_FROM_DEVICE, &dma_addr);
  654. if (ret)
  655. return ret;
  656. return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
  657. }
  658. static struct artpec6_crypto_bounce_buffer *
  659. artpec6_crypto_alloc_bounce(gfp_t flags)
  660. {
  661. void *base;
  662. size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
  663. 2 * ARTPEC_CACHE_LINE_MAX;
  664. struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
  665. if (!bbuf)
  666. return NULL;
  667. base = bbuf + 1;
  668. bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
  669. return bbuf;
  670. }
  671. static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
  672. struct artpec6_crypto_walk *walk, size_t size)
  673. {
  674. struct artpec6_crypto_bounce_buffer *bbuf;
  675. int ret;
  676. bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
  677. if (!bbuf)
  678. return -ENOMEM;
  679. bbuf->length = size;
  680. bbuf->sg = walk->sg;
  681. bbuf->offset = walk->offset;
  682. ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
  683. if (ret) {
  684. kfree(bbuf);
  685. return ret;
  686. }
  687. pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
  688. list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
  689. return 0;
  690. }
  691. static int
  692. artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
  693. struct artpec6_crypto_walk *walk,
  694. size_t count)
  695. {
  696. size_t chunk;
  697. int ret;
  698. dma_addr_t addr;
  699. while (walk->sg && count) {
  700. chunk = min(count, artpec6_crypto_walk_chunklen(walk));
  701. addr = artpec6_crypto_walk_chunk_phys(walk);
  702. /* When destination buffers are not aligned to the cache line
  703. * size we need bounce buffers. The DMA-API requires that the
  704. * entire line is owned by the DMA buffer and this holds also
  705. * for the case when coherent DMA is used.
  706. */
  707. if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
  708. chunk = min_t(dma_addr_t, chunk,
  709. ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
  710. addr);
  711. pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
  712. ret = setup_bounce_buffer_in(common, walk, chunk);
  713. } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
  714. pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
  715. ret = setup_bounce_buffer_in(common, walk, chunk);
  716. } else {
  717. dma_addr_t dma_addr;
  718. chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
  719. pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
  720. ret = artpec6_crypto_dma_map_page(common,
  721. sg_page(walk->sg),
  722. walk->sg->offset +
  723. walk->offset,
  724. chunk,
  725. DMA_FROM_DEVICE,
  726. &dma_addr);
  727. if (ret)
  728. return ret;
  729. ret = artpec6_crypto_setup_in_descr_phys(common,
  730. dma_addr,
  731. chunk, false);
  732. }
  733. if (ret)
  734. return ret;
  735. count = count - chunk;
  736. artpec6_crypto_walk_advance(walk, chunk);
  737. }
  738. if (count)
  739. pr_err("EOL unexpected %zu bytes left\n", count);
  740. return count ? -EINVAL : 0;
  741. }
  742. static int
  743. artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
  744. struct artpec6_crypto_walk *walk,
  745. size_t count)
  746. {
  747. size_t chunk;
  748. int ret;
  749. dma_addr_t addr;
  750. while (walk->sg && count) {
  751. chunk = min(count, artpec6_crypto_walk_chunklen(walk));
  752. addr = artpec6_crypto_walk_chunk_phys(walk);
  753. pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
  754. if (addr & 3) {
  755. char buf[3];
  756. chunk = min_t(size_t, chunk, (4-(addr&3)));
  757. sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
  758. walk->offset);
  759. ret = artpec6_crypto_setup_out_descr_short(common, buf,
  760. chunk,
  761. false);
  762. } else {
  763. dma_addr_t dma_addr;
  764. ret = artpec6_crypto_dma_map_page(common,
  765. sg_page(walk->sg),
  766. walk->sg->offset +
  767. walk->offset,
  768. chunk,
  769. DMA_TO_DEVICE,
  770. &dma_addr);
  771. if (ret)
  772. return ret;
  773. ret = artpec6_crypto_setup_out_descr_phys(common,
  774. dma_addr,
  775. chunk, false);
  776. }
  777. if (ret)
  778. return ret;
  779. count = count - chunk;
  780. artpec6_crypto_walk_advance(walk, chunk);
  781. }
  782. if (count)
  783. pr_err("EOL unexpected %zu bytes left\n", count);
  784. return count ? -EINVAL : 0;
  785. }
  786. /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
  787. *
  788. * If the out descriptor list is non-empty, then the eop flag on the
  789. * last used out descriptor will be set.
  790. *
  791. * @return 0 on success
  792. * -EINVAL if the out descriptor is empty or has overflown
  793. */
  794. static int
  795. artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
  796. {
  797. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  798. struct pdma_descr *d;
  799. if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
  800. pr_err("%s: OUT descriptor list is %s\n",
  801. MODULE_NAME, dma->out_cnt ? "empty" : "full");
  802. return -EINVAL;
  803. }
  804. d = &dma->out[dma->out_cnt-1];
  805. d->ctrl.eop = 1;
  806. return 0;
  807. }
  808. /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
  809. * in descriptor
  810. *
  811. * See artpec6_crypto_terminate_out_descrs() for return values
  812. */
  813. static int
  814. artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
  815. {
  816. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  817. struct pdma_descr *d;
  818. if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
  819. pr_err("%s: IN descriptor list is %s\n",
  820. MODULE_NAME, dma->in_cnt ? "empty" : "full");
  821. return -EINVAL;
  822. }
  823. d = &dma->in[dma->in_cnt-1];
  824. d->ctrl.intr = 1;
  825. return 0;
  826. }
  827. /** create_hash_pad - Create a Secure Hash conformant pad
  828. *
  829. * @dst: The destination buffer to write the pad. Must be at least 64 bytes
  830. * @dgstlen: The total length of the hash digest in bytes
  831. * @bitcount: The total length of the digest in bits
  832. *
  833. * @return The total number of padding bytes written to @dst
  834. */
  835. static size_t
  836. create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
  837. {
  838. unsigned int mod, target, diff, pad_bytes, size_bytes;
  839. __be64 bits = __cpu_to_be64(bitcount);
  840. switch (oper) {
  841. case regk_crypto_sha1:
  842. case regk_crypto_sha256:
  843. case regk_crypto_hmac_sha1:
  844. case regk_crypto_hmac_sha256:
  845. target = 448 / 8;
  846. mod = 512 / 8;
  847. size_bytes = 8;
  848. break;
  849. default:
  850. target = 896 / 8;
  851. mod = 1024 / 8;
  852. size_bytes = 16;
  853. break;
  854. }
  855. target -= 1;
  856. diff = dgstlen & (mod - 1);
  857. pad_bytes = diff > target ? target + mod - diff : target - diff;
  858. memset(dst + 1, 0, pad_bytes);
  859. dst[0] = 0x80;
  860. if (size_bytes == 16) {
  861. memset(dst + 1 + pad_bytes, 0, 8);
  862. memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
  863. } else {
  864. memcpy(dst + 1 + pad_bytes, &bits, 8);
  865. }
  866. return pad_bytes + size_bytes + 1;
  867. }
  868. static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
  869. struct crypto_async_request *parent,
  870. void (*complete)(struct crypto_async_request *req),
  871. struct scatterlist *dstsg, unsigned int nbytes)
  872. {
  873. gfp_t flags;
  874. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  875. flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  876. GFP_KERNEL : GFP_ATOMIC;
  877. common->gfp_flags = flags;
  878. common->dma = kmem_cache_alloc(ac->dma_cache, flags);
  879. if (!common->dma)
  880. return -ENOMEM;
  881. common->req = parent;
  882. common->complete = complete;
  883. return 0;
  884. }
  885. static void
  886. artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
  887. {
  888. struct artpec6_crypto_bounce_buffer *b;
  889. struct artpec6_crypto_bounce_buffer *next;
  890. list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
  891. kfree(b);
  892. }
  893. }
  894. static int
  895. artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
  896. {
  897. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  898. artpec6_crypto_dma_unmap_all(common);
  899. artpec6_crypto_bounce_destroy(common->dma);
  900. kmem_cache_free(ac->dma_cache, common->dma);
  901. common->dma = NULL;
  902. return 0;
  903. }
  904. /*
  905. * Ciphering functions.
  906. */
  907. static int artpec6_crypto_encrypt(struct skcipher_request *req)
  908. {
  909. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  910. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  911. struct artpec6_crypto_request_context *req_ctx = NULL;
  912. void (*complete)(struct crypto_async_request *req);
  913. int ret;
  914. req_ctx = skcipher_request_ctx(req);
  915. switch (ctx->crypto_type) {
  916. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  917. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  918. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  919. req_ctx->decrypt = 0;
  920. break;
  921. default:
  922. break;
  923. }
  924. switch (ctx->crypto_type) {
  925. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  926. complete = artpec6_crypto_complete_cbc_encrypt;
  927. break;
  928. default:
  929. complete = artpec6_crypto_complete_crypto;
  930. break;
  931. }
  932. ret = artpec6_crypto_common_init(&req_ctx->common,
  933. &req->base,
  934. complete,
  935. req->dst, req->cryptlen);
  936. if (ret)
  937. return ret;
  938. ret = artpec6_crypto_prepare_crypto(req);
  939. if (ret) {
  940. artpec6_crypto_common_destroy(&req_ctx->common);
  941. return ret;
  942. }
  943. return artpec6_crypto_submit(&req_ctx->common);
  944. }
  945. static int artpec6_crypto_decrypt(struct skcipher_request *req)
  946. {
  947. int ret;
  948. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  949. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  950. struct artpec6_crypto_request_context *req_ctx = NULL;
  951. void (*complete)(struct crypto_async_request *req);
  952. req_ctx = skcipher_request_ctx(req);
  953. switch (ctx->crypto_type) {
  954. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  955. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  956. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  957. req_ctx->decrypt = 1;
  958. break;
  959. default:
  960. break;
  961. }
  962. switch (ctx->crypto_type) {
  963. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  964. complete = artpec6_crypto_complete_cbc_decrypt;
  965. break;
  966. default:
  967. complete = artpec6_crypto_complete_crypto;
  968. break;
  969. }
  970. ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
  971. complete,
  972. req->dst, req->cryptlen);
  973. if (ret)
  974. return ret;
  975. ret = artpec6_crypto_prepare_crypto(req);
  976. if (ret) {
  977. artpec6_crypto_common_destroy(&req_ctx->common);
  978. return ret;
  979. }
  980. return artpec6_crypto_submit(&req_ctx->common);
  981. }
  982. static int
  983. artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
  984. {
  985. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  986. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  987. size_t iv_len = crypto_skcipher_ivsize(cipher);
  988. unsigned int counter = be32_to_cpup((__be32 *)
  989. (req->iv + iv_len - 4));
  990. unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
  991. AES_BLOCK_SIZE;
  992. /*
  993. * The hardware uses only the last 32-bits as the counter while the
  994. * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
  995. * the whole IV is a counter. So fallback if the counter is going to
  996. * overlow.
  997. */
  998. if (counter + nblks < counter) {
  999. int ret;
  1000. pr_debug("counter %x will overflow (nblks %u), falling back\n",
  1001. counter, counter + nblks);
  1002. ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
  1003. ctx->key_length);
  1004. if (ret)
  1005. return ret;
  1006. {
  1007. SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
  1008. skcipher_request_set_sync_tfm(subreq, ctx->fallback);
  1009. skcipher_request_set_callback(subreq, req->base.flags,
  1010. NULL, NULL);
  1011. skcipher_request_set_crypt(subreq, req->src, req->dst,
  1012. req->cryptlen, req->iv);
  1013. ret = encrypt ? crypto_skcipher_encrypt(subreq)
  1014. : crypto_skcipher_decrypt(subreq);
  1015. skcipher_request_zero(subreq);
  1016. }
  1017. return ret;
  1018. }
  1019. return encrypt ? artpec6_crypto_encrypt(req)
  1020. : artpec6_crypto_decrypt(req);
  1021. }
  1022. static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
  1023. {
  1024. return artpec6_crypto_ctr_crypt(req, true);
  1025. }
  1026. static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
  1027. {
  1028. return artpec6_crypto_ctr_crypt(req, false);
  1029. }
  1030. /*
  1031. * AEAD functions
  1032. */
  1033. static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
  1034. {
  1035. struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
  1036. memset(tfm_ctx, 0, sizeof(*tfm_ctx));
  1037. crypto_aead_set_reqsize(tfm,
  1038. sizeof(struct artpec6_crypto_aead_req_ctx));
  1039. return 0;
  1040. }
  1041. static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
  1042. unsigned int len)
  1043. {
  1044. struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
  1045. if (len != 16 && len != 24 && len != 32)
  1046. return -EINVAL;
  1047. ctx->key_length = len;
  1048. memcpy(ctx->aes_key, key, len);
  1049. return 0;
  1050. }
  1051. static int artpec6_crypto_aead_encrypt(struct aead_request *req)
  1052. {
  1053. int ret;
  1054. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
  1055. req_ctx->decrypt = false;
  1056. ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
  1057. artpec6_crypto_complete_aead,
  1058. NULL, 0);
  1059. if (ret)
  1060. return ret;
  1061. ret = artpec6_crypto_prepare_aead(req);
  1062. if (ret) {
  1063. artpec6_crypto_common_destroy(&req_ctx->common);
  1064. return ret;
  1065. }
  1066. return artpec6_crypto_submit(&req_ctx->common);
  1067. }
  1068. static int artpec6_crypto_aead_decrypt(struct aead_request *req)
  1069. {
  1070. int ret;
  1071. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
  1072. req_ctx->decrypt = true;
  1073. if (req->cryptlen < AES_BLOCK_SIZE)
  1074. return -EINVAL;
  1075. ret = artpec6_crypto_common_init(&req_ctx->common,
  1076. &req->base,
  1077. artpec6_crypto_complete_aead,
  1078. NULL, 0);
  1079. if (ret)
  1080. return ret;
  1081. ret = artpec6_crypto_prepare_aead(req);
  1082. if (ret) {
  1083. artpec6_crypto_common_destroy(&req_ctx->common);
  1084. return ret;
  1085. }
  1086. return artpec6_crypto_submit(&req_ctx->common);
  1087. }
  1088. static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
  1089. {
  1090. struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
  1091. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
  1092. size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
  1093. size_t contextsize = digestsize;
  1094. size_t blocksize = crypto_tfm_alg_blocksize(
  1095. crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
  1096. struct artpec6_crypto_req_common *common = &req_ctx->common;
  1097. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1098. enum artpec6_crypto_variant variant = ac->variant;
  1099. u32 sel_ctx;
  1100. bool ext_ctx = false;
  1101. bool run_hw = false;
  1102. int error = 0;
  1103. artpec6_crypto_init_dma_operation(common);
  1104. /* Upload HMAC key, must be first the first packet */
  1105. if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
  1106. if (variant == ARTPEC6_CRYPTO) {
  1107. req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
  1108. a6_regk_crypto_dlkey);
  1109. } else {
  1110. req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
  1111. a7_regk_crypto_dlkey);
  1112. }
  1113. /* Copy and pad up the key */
  1114. memcpy(req_ctx->key_buffer, ctx->hmac_key,
  1115. ctx->hmac_key_length);
  1116. memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
  1117. blocksize - ctx->hmac_key_length);
  1118. error = artpec6_crypto_setup_out_descr(common,
  1119. (void *)&req_ctx->key_md,
  1120. sizeof(req_ctx->key_md), false, false);
  1121. if (error)
  1122. return error;
  1123. error = artpec6_crypto_setup_out_descr(common,
  1124. req_ctx->key_buffer, blocksize,
  1125. true, false);
  1126. if (error)
  1127. return error;
  1128. }
  1129. if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
  1130. /* Restore context */
  1131. sel_ctx = regk_crypto_ext;
  1132. ext_ctx = true;
  1133. } else {
  1134. sel_ctx = regk_crypto_init;
  1135. }
  1136. if (variant == ARTPEC6_CRYPTO) {
  1137. req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
  1138. req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
  1139. /* If this is the final round, set the final flag */
  1140. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
  1141. req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
  1142. } else {
  1143. req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
  1144. req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
  1145. /* If this is the final round, set the final flag */
  1146. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
  1147. req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
  1148. }
  1149. /* Setup up metadata descriptors */
  1150. error = artpec6_crypto_setup_out_descr(common,
  1151. (void *)&req_ctx->hash_md,
  1152. sizeof(req_ctx->hash_md), false, false);
  1153. if (error)
  1154. return error;
  1155. error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1156. if (error)
  1157. return error;
  1158. if (ext_ctx) {
  1159. error = artpec6_crypto_setup_out_descr(common,
  1160. req_ctx->digeststate,
  1161. contextsize, false, false);
  1162. if (error)
  1163. return error;
  1164. }
  1165. if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
  1166. size_t done_bytes = 0;
  1167. size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
  1168. size_t ready_bytes = round_down(total_bytes, blocksize);
  1169. struct artpec6_crypto_walk walk;
  1170. run_hw = ready_bytes > 0;
  1171. if (req_ctx->partial_bytes && ready_bytes) {
  1172. /* We have a partial buffer and will at least some bytes
  1173. * to the HW. Empty this partial buffer before tackling
  1174. * the SG lists
  1175. */
  1176. memcpy(req_ctx->partial_buffer_out,
  1177. req_ctx->partial_buffer,
  1178. req_ctx->partial_bytes);
  1179. error = artpec6_crypto_setup_out_descr(common,
  1180. req_ctx->partial_buffer_out,
  1181. req_ctx->partial_bytes,
  1182. false, true);
  1183. if (error)
  1184. return error;
  1185. /* Reset partial buffer */
  1186. done_bytes += req_ctx->partial_bytes;
  1187. req_ctx->partial_bytes = 0;
  1188. }
  1189. artpec6_crypto_walk_init(&walk, areq->src);
  1190. error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
  1191. ready_bytes -
  1192. done_bytes);
  1193. if (error)
  1194. return error;
  1195. if (walk.sg) {
  1196. size_t sg_skip = ready_bytes - done_bytes;
  1197. size_t sg_rem = areq->nbytes - sg_skip;
  1198. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  1199. req_ctx->partial_buffer +
  1200. req_ctx->partial_bytes,
  1201. sg_rem, sg_skip);
  1202. req_ctx->partial_bytes += sg_rem;
  1203. }
  1204. req_ctx->digcnt += ready_bytes;
  1205. req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
  1206. }
  1207. /* Finalize */
  1208. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
  1209. size_t hash_pad_len;
  1210. u64 digest_bits;
  1211. u32 oper;
  1212. if (variant == ARTPEC6_CRYPTO)
  1213. oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
  1214. else
  1215. oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
  1216. /* Write out the partial buffer if present */
  1217. if (req_ctx->partial_bytes) {
  1218. memcpy(req_ctx->partial_buffer_out,
  1219. req_ctx->partial_buffer,
  1220. req_ctx->partial_bytes);
  1221. error = artpec6_crypto_setup_out_descr(common,
  1222. req_ctx->partial_buffer_out,
  1223. req_ctx->partial_bytes,
  1224. false, true);
  1225. if (error)
  1226. return error;
  1227. req_ctx->digcnt += req_ctx->partial_bytes;
  1228. req_ctx->partial_bytes = 0;
  1229. }
  1230. if (req_ctx->hash_flags & HASH_FLAG_HMAC)
  1231. digest_bits = 8 * (req_ctx->digcnt + blocksize);
  1232. else
  1233. digest_bits = 8 * req_ctx->digcnt;
  1234. /* Add the hash pad */
  1235. hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
  1236. req_ctx->digcnt, digest_bits);
  1237. error = artpec6_crypto_setup_out_descr(common,
  1238. req_ctx->pad_buffer,
  1239. hash_pad_len, false,
  1240. true);
  1241. req_ctx->digcnt = 0;
  1242. if (error)
  1243. return error;
  1244. /* Descriptor for the final result */
  1245. error = artpec6_crypto_setup_in_descr(common, areq->result,
  1246. digestsize,
  1247. true);
  1248. if (error)
  1249. return error;
  1250. } else { /* This is not the final operation for this request */
  1251. if (!run_hw)
  1252. return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
  1253. /* Save the result to the context */
  1254. error = artpec6_crypto_setup_in_descr(common,
  1255. req_ctx->digeststate,
  1256. contextsize, false);
  1257. if (error)
  1258. return error;
  1259. /* fall through */
  1260. }
  1261. req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
  1262. HASH_FLAG_FINALIZE);
  1263. error = artpec6_crypto_terminate_in_descrs(common);
  1264. if (error)
  1265. return error;
  1266. error = artpec6_crypto_terminate_out_descrs(common);
  1267. if (error)
  1268. return error;
  1269. error = artpec6_crypto_dma_map_descs(common);
  1270. if (error)
  1271. return error;
  1272. return ARTPEC6_CRYPTO_PREPARE_HASH_START;
  1273. }
  1274. static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
  1275. {
  1276. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1277. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1278. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
  1279. return 0;
  1280. }
  1281. static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
  1282. {
  1283. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1284. ctx->fallback =
  1285. crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
  1286. 0, CRYPTO_ALG_NEED_FALLBACK);
  1287. if (IS_ERR(ctx->fallback))
  1288. return PTR_ERR(ctx->fallback);
  1289. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1290. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
  1291. return 0;
  1292. }
  1293. static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
  1294. {
  1295. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1296. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1297. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
  1298. return 0;
  1299. }
  1300. static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
  1301. {
  1302. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1303. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1304. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
  1305. return 0;
  1306. }
  1307. static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
  1308. {
  1309. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1310. memset(ctx, 0, sizeof(*ctx));
  1311. }
  1312. static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
  1313. {
  1314. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1315. crypto_free_sync_skcipher(ctx->fallback);
  1316. artpec6_crypto_aes_exit(tfm);
  1317. }
  1318. static int
  1319. artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
  1320. unsigned int keylen)
  1321. {
  1322. struct artpec6_cryptotfm_context *ctx =
  1323. crypto_skcipher_ctx(cipher);
  1324. switch (keylen) {
  1325. case 16:
  1326. case 24:
  1327. case 32:
  1328. break;
  1329. default:
  1330. return -EINVAL;
  1331. }
  1332. memcpy(ctx->aes_key, key, keylen);
  1333. ctx->key_length = keylen;
  1334. return 0;
  1335. }
  1336. static int
  1337. artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
  1338. unsigned int keylen)
  1339. {
  1340. struct artpec6_cryptotfm_context *ctx =
  1341. crypto_skcipher_ctx(cipher);
  1342. int ret;
  1343. ret = xts_check_key(&cipher->base, key, keylen);
  1344. if (ret)
  1345. return ret;
  1346. switch (keylen) {
  1347. case 32:
  1348. case 48:
  1349. case 64:
  1350. break;
  1351. default:
  1352. return -EINVAL;
  1353. }
  1354. memcpy(ctx->aes_key, key, keylen);
  1355. ctx->key_length = keylen;
  1356. return 0;
  1357. }
  1358. /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
  1359. *
  1360. * @req: The asynch request to process
  1361. *
  1362. * @return 0 if the dma job was successfully prepared
  1363. * <0 on error
  1364. *
  1365. * This function sets up the PDMA descriptors for a block cipher request.
  1366. *
  1367. * The required padding is added for AES-CTR using a statically defined
  1368. * buffer.
  1369. *
  1370. * The PDMA descriptor list will be as follows:
  1371. *
  1372. * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
  1373. * IN: <CIPHER_MD><data_0>...[data_n]<intr>
  1374. *
  1375. */
  1376. static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
  1377. {
  1378. int ret;
  1379. struct artpec6_crypto_walk walk;
  1380. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
  1381. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  1382. struct artpec6_crypto_request_context *req_ctx = NULL;
  1383. size_t iv_len = crypto_skcipher_ivsize(cipher);
  1384. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1385. enum artpec6_crypto_variant variant = ac->variant;
  1386. struct artpec6_crypto_req_common *common;
  1387. bool cipher_decr = false;
  1388. size_t cipher_klen;
  1389. u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
  1390. u32 oper;
  1391. req_ctx = skcipher_request_ctx(areq);
  1392. common = &req_ctx->common;
  1393. artpec6_crypto_init_dma_operation(common);
  1394. if (variant == ARTPEC6_CRYPTO)
  1395. ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
  1396. else
  1397. ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
  1398. ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
  1399. sizeof(ctx->key_md), false, false);
  1400. if (ret)
  1401. return ret;
  1402. ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
  1403. ctx->key_length, true, false);
  1404. if (ret)
  1405. return ret;
  1406. req_ctx->cipher_md = 0;
  1407. if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
  1408. cipher_klen = ctx->key_length/2;
  1409. else
  1410. cipher_klen = ctx->key_length;
  1411. /* Metadata */
  1412. switch (cipher_klen) {
  1413. case 16:
  1414. cipher_len = regk_crypto_key_128;
  1415. break;
  1416. case 24:
  1417. cipher_len = regk_crypto_key_192;
  1418. break;
  1419. case 32:
  1420. cipher_len = regk_crypto_key_256;
  1421. break;
  1422. default:
  1423. pr_err("%s: Invalid key length %zu!\n",
  1424. MODULE_NAME, ctx->key_length);
  1425. return -EINVAL;
  1426. }
  1427. switch (ctx->crypto_type) {
  1428. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  1429. oper = regk_crypto_aes_ecb;
  1430. cipher_decr = req_ctx->decrypt;
  1431. break;
  1432. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  1433. oper = regk_crypto_aes_cbc;
  1434. cipher_decr = req_ctx->decrypt;
  1435. break;
  1436. case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
  1437. oper = regk_crypto_aes_ctr;
  1438. cipher_decr = false;
  1439. break;
  1440. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  1441. oper = regk_crypto_aes_xts;
  1442. cipher_decr = req_ctx->decrypt;
  1443. if (variant == ARTPEC6_CRYPTO)
  1444. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
  1445. else
  1446. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
  1447. break;
  1448. default:
  1449. pr_err("%s: Invalid cipher mode %d!\n",
  1450. MODULE_NAME, ctx->crypto_type);
  1451. return -EINVAL;
  1452. }
  1453. if (variant == ARTPEC6_CRYPTO) {
  1454. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
  1455. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
  1456. cipher_len);
  1457. if (cipher_decr)
  1458. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
  1459. } else {
  1460. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
  1461. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
  1462. cipher_len);
  1463. if (cipher_decr)
  1464. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
  1465. }
  1466. ret = artpec6_crypto_setup_out_descr(common,
  1467. &req_ctx->cipher_md,
  1468. sizeof(req_ctx->cipher_md),
  1469. false, false);
  1470. if (ret)
  1471. return ret;
  1472. ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1473. if (ret)
  1474. return ret;
  1475. if (iv_len) {
  1476. ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
  1477. false, false);
  1478. if (ret)
  1479. return ret;
  1480. }
  1481. /* Data out */
  1482. artpec6_crypto_walk_init(&walk, areq->src);
  1483. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
  1484. if (ret)
  1485. return ret;
  1486. /* Data in */
  1487. artpec6_crypto_walk_init(&walk, areq->dst);
  1488. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
  1489. if (ret)
  1490. return ret;
  1491. /* CTR-mode padding required by the HW. */
  1492. if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
  1493. ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
  1494. size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
  1495. areq->cryptlen;
  1496. if (pad) {
  1497. ret = artpec6_crypto_setup_out_descr(common,
  1498. ac->pad_buffer,
  1499. pad, false, false);
  1500. if (ret)
  1501. return ret;
  1502. ret = artpec6_crypto_setup_in_descr(common,
  1503. ac->pad_buffer, pad,
  1504. false);
  1505. if (ret)
  1506. return ret;
  1507. }
  1508. }
  1509. ret = artpec6_crypto_terminate_out_descrs(common);
  1510. if (ret)
  1511. return ret;
  1512. ret = artpec6_crypto_terminate_in_descrs(common);
  1513. if (ret)
  1514. return ret;
  1515. return artpec6_crypto_dma_map_descs(common);
  1516. }
  1517. static int artpec6_crypto_prepare_aead(struct aead_request *areq)
  1518. {
  1519. size_t count;
  1520. int ret;
  1521. size_t input_length;
  1522. struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
  1523. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
  1524. struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
  1525. struct artpec6_crypto_req_common *common = &req_ctx->common;
  1526. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1527. enum artpec6_crypto_variant variant = ac->variant;
  1528. u32 md_cipher_len;
  1529. artpec6_crypto_init_dma_operation(common);
  1530. /* Key */
  1531. if (variant == ARTPEC6_CRYPTO) {
  1532. ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
  1533. a6_regk_crypto_dlkey);
  1534. } else {
  1535. ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
  1536. a7_regk_crypto_dlkey);
  1537. }
  1538. ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
  1539. sizeof(ctx->key_md), false, false);
  1540. if (ret)
  1541. return ret;
  1542. ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
  1543. ctx->key_length, true, false);
  1544. if (ret)
  1545. return ret;
  1546. req_ctx->cipher_md = 0;
  1547. switch (ctx->key_length) {
  1548. case 16:
  1549. md_cipher_len = regk_crypto_key_128;
  1550. break;
  1551. case 24:
  1552. md_cipher_len = regk_crypto_key_192;
  1553. break;
  1554. case 32:
  1555. md_cipher_len = regk_crypto_key_256;
  1556. break;
  1557. default:
  1558. return -EINVAL;
  1559. }
  1560. if (variant == ARTPEC6_CRYPTO) {
  1561. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
  1562. regk_crypto_aes_gcm);
  1563. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
  1564. md_cipher_len);
  1565. if (req_ctx->decrypt)
  1566. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
  1567. } else {
  1568. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
  1569. regk_crypto_aes_gcm);
  1570. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
  1571. md_cipher_len);
  1572. if (req_ctx->decrypt)
  1573. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
  1574. }
  1575. ret = artpec6_crypto_setup_out_descr(common,
  1576. (void *) &req_ctx->cipher_md,
  1577. sizeof(req_ctx->cipher_md), false,
  1578. false);
  1579. if (ret)
  1580. return ret;
  1581. ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1582. if (ret)
  1583. return ret;
  1584. /* For the decryption, cryptlen includes the tag. */
  1585. input_length = areq->cryptlen;
  1586. if (req_ctx->decrypt)
  1587. input_length -= crypto_aead_authsize(cipher);
  1588. /* Prepare the context buffer */
  1589. req_ctx->hw_ctx.aad_length_bits =
  1590. __cpu_to_be64(8*areq->assoclen);
  1591. req_ctx->hw_ctx.text_length_bits =
  1592. __cpu_to_be64(8*input_length);
  1593. memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
  1594. // The HW omits the initial increment of the counter field.
  1595. memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
  1596. ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
  1597. sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
  1598. if (ret)
  1599. return ret;
  1600. {
  1601. struct artpec6_crypto_walk walk;
  1602. artpec6_crypto_walk_init(&walk, areq->src);
  1603. /* Associated data */
  1604. count = areq->assoclen;
  1605. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
  1606. if (ret)
  1607. return ret;
  1608. if (!IS_ALIGNED(areq->assoclen, 16)) {
  1609. size_t assoc_pad = 16 - (areq->assoclen % 16);
  1610. /* The HW mandates zero padding here */
  1611. ret = artpec6_crypto_setup_out_descr(common,
  1612. ac->zero_buffer,
  1613. assoc_pad, false,
  1614. false);
  1615. if (ret)
  1616. return ret;
  1617. }
  1618. /* Data to crypto */
  1619. count = input_length;
  1620. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
  1621. if (ret)
  1622. return ret;
  1623. if (!IS_ALIGNED(input_length, 16)) {
  1624. size_t crypto_pad = 16 - (input_length % 16);
  1625. /* The HW mandates zero padding here */
  1626. ret = artpec6_crypto_setup_out_descr(common,
  1627. ac->zero_buffer,
  1628. crypto_pad,
  1629. false,
  1630. false);
  1631. if (ret)
  1632. return ret;
  1633. }
  1634. }
  1635. /* Data from crypto */
  1636. {
  1637. struct artpec6_crypto_walk walk;
  1638. size_t output_len = areq->cryptlen;
  1639. if (req_ctx->decrypt)
  1640. output_len -= crypto_aead_authsize(cipher);
  1641. artpec6_crypto_walk_init(&walk, areq->dst);
  1642. /* skip associated data in the output */
  1643. count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
  1644. if (count)
  1645. return -EINVAL;
  1646. count = output_len;
  1647. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
  1648. if (ret)
  1649. return ret;
  1650. /* Put padding between the cryptotext and the auth tag */
  1651. if (!IS_ALIGNED(output_len, 16)) {
  1652. size_t crypto_pad = 16 - (output_len % 16);
  1653. ret = artpec6_crypto_setup_in_descr(common,
  1654. ac->pad_buffer,
  1655. crypto_pad, false);
  1656. if (ret)
  1657. return ret;
  1658. }
  1659. /* The authentication tag shall follow immediately after
  1660. * the output ciphertext. For decryption it is put in a context
  1661. * buffer for later compare against the input tag.
  1662. */
  1663. if (req_ctx->decrypt) {
  1664. ret = artpec6_crypto_setup_in_descr(common,
  1665. req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
  1666. if (ret)
  1667. return ret;
  1668. } else {
  1669. /* For encryption the requested tag size may be smaller
  1670. * than the hardware's generated tag.
  1671. */
  1672. size_t authsize = crypto_aead_authsize(cipher);
  1673. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
  1674. authsize);
  1675. if (ret)
  1676. return ret;
  1677. if (authsize < AES_BLOCK_SIZE) {
  1678. count = AES_BLOCK_SIZE - authsize;
  1679. ret = artpec6_crypto_setup_in_descr(common,
  1680. ac->pad_buffer,
  1681. count, false);
  1682. if (ret)
  1683. return ret;
  1684. }
  1685. }
  1686. }
  1687. ret = artpec6_crypto_terminate_in_descrs(common);
  1688. if (ret)
  1689. return ret;
  1690. ret = artpec6_crypto_terminate_out_descrs(common);
  1691. if (ret)
  1692. return ret;
  1693. return artpec6_crypto_dma_map_descs(common);
  1694. }
  1695. static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
  1696. struct list_head *completions)
  1697. {
  1698. struct artpec6_crypto_req_common *req;
  1699. while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
  1700. req = list_first_entry(&ac->queue,
  1701. struct artpec6_crypto_req_common,
  1702. list);
  1703. list_move_tail(&req->list, &ac->pending);
  1704. artpec6_crypto_start_dma(req);
  1705. list_add_tail(&req->complete_in_progress, completions);
  1706. }
  1707. /*
  1708. * In some cases, the hardware can raise an in_eop_flush interrupt
  1709. * before actually updating the status, so we have an timer which will
  1710. * recheck the status on timeout. Since the cases are expected to be
  1711. * very rare, we use a relatively large timeout value. There should be
  1712. * no noticeable negative effect if we timeout spuriously.
  1713. */
  1714. if (ac->pending_count)
  1715. mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
  1716. else
  1717. del_timer(&ac->timer);
  1718. }
  1719. static void artpec6_crypto_timeout(struct timer_list *t)
  1720. {
  1721. struct artpec6_crypto *ac = from_timer(ac, t, timer);
  1722. dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
  1723. tasklet_schedule(&ac->task);
  1724. }
  1725. static void artpec6_crypto_task(unsigned long data)
  1726. {
  1727. struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
  1728. struct artpec6_crypto_req_common *req;
  1729. struct artpec6_crypto_req_common *n;
  1730. struct list_head complete_done;
  1731. struct list_head complete_in_progress;
  1732. INIT_LIST_HEAD(&complete_done);
  1733. INIT_LIST_HEAD(&complete_in_progress);
  1734. if (list_empty(&ac->pending)) {
  1735. pr_debug("Spurious IRQ\n");
  1736. return;
  1737. }
  1738. spin_lock(&ac->queue_lock);
  1739. list_for_each_entry_safe(req, n, &ac->pending, list) {
  1740. struct artpec6_crypto_dma_descriptors *dma = req->dma;
  1741. u32 stat;
  1742. dma_addr_t stataddr;
  1743. stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
  1744. dma_sync_single_for_cpu(artpec6_crypto_dev,
  1745. stataddr,
  1746. 4,
  1747. DMA_BIDIRECTIONAL);
  1748. stat = req->dma->stat[req->dma->in_cnt-1];
  1749. /* A non-zero final status descriptor indicates
  1750. * this job has finished.
  1751. */
  1752. pr_debug("Request %p status is %X\n", req, stat);
  1753. if (!stat)
  1754. break;
  1755. /* Allow testing of timeout handling with fault injection */
  1756. #ifdef CONFIG_FAULT_INJECTION
  1757. if (should_fail(&artpec6_crypto_fail_status_read, 1))
  1758. continue;
  1759. #endif
  1760. pr_debug("Completing request %p\n", req);
  1761. list_move_tail(&req->list, &complete_done);
  1762. ac->pending_count--;
  1763. }
  1764. artpec6_crypto_process_queue(ac, &complete_in_progress);
  1765. spin_unlock(&ac->queue_lock);
  1766. /* Perform the completion callbacks without holding the queue lock
  1767. * to allow new request submissions from the callbacks.
  1768. */
  1769. list_for_each_entry_safe(req, n, &complete_done, list) {
  1770. artpec6_crypto_dma_unmap_all(req);
  1771. artpec6_crypto_copy_bounce_buffers(req);
  1772. artpec6_crypto_common_destroy(req);
  1773. req->complete(req->req);
  1774. }
  1775. list_for_each_entry_safe(req, n, &complete_in_progress,
  1776. complete_in_progress) {
  1777. req->req->complete(req->req, -EINPROGRESS);
  1778. }
  1779. }
  1780. static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
  1781. {
  1782. req->complete(req, 0);
  1783. }
  1784. static void
  1785. artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
  1786. {
  1787. struct skcipher_request *cipher_req = container_of(req,
  1788. struct skcipher_request, base);
  1789. scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
  1790. cipher_req->cryptlen - AES_BLOCK_SIZE,
  1791. AES_BLOCK_SIZE, 0);
  1792. req->complete(req, 0);
  1793. }
  1794. static void
  1795. artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
  1796. {
  1797. struct skcipher_request *cipher_req = container_of(req,
  1798. struct skcipher_request, base);
  1799. scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
  1800. cipher_req->cryptlen - AES_BLOCK_SIZE,
  1801. AES_BLOCK_SIZE, 0);
  1802. req->complete(req, 0);
  1803. }
  1804. static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
  1805. {
  1806. int result = 0;
  1807. /* Verify GCM hashtag. */
  1808. struct aead_request *areq = container_of(req,
  1809. struct aead_request, base);
  1810. struct crypto_aead *aead = crypto_aead_reqtfm(areq);
  1811. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
  1812. if (req_ctx->decrypt) {
  1813. u8 input_tag[AES_BLOCK_SIZE];
  1814. unsigned int authsize = crypto_aead_authsize(aead);
  1815. sg_pcopy_to_buffer(areq->src,
  1816. sg_nents(areq->src),
  1817. input_tag,
  1818. authsize,
  1819. areq->assoclen + areq->cryptlen -
  1820. authsize);
  1821. if (crypto_memneq(req_ctx->decryption_tag,
  1822. input_tag,
  1823. authsize)) {
  1824. pr_debug("***EBADMSG:\n");
  1825. print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
  1826. input_tag, authsize, true);
  1827. print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
  1828. req_ctx->decryption_tag,
  1829. authsize, true);
  1830. result = -EBADMSG;
  1831. }
  1832. }
  1833. req->complete(req, result);
  1834. }
  1835. static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
  1836. {
  1837. req->complete(req, 0);
  1838. }
  1839. /*------------------- Hash functions -----------------------------------------*/
  1840. static int
  1841. artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
  1842. const u8 *key, unsigned int keylen)
  1843. {
  1844. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
  1845. size_t blocksize;
  1846. int ret;
  1847. if (!keylen) {
  1848. pr_err("Invalid length (%d) of HMAC key\n",
  1849. keylen);
  1850. return -EINVAL;
  1851. }
  1852. memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
  1853. blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1854. if (keylen > blocksize) {
  1855. tfm_ctx->hmac_key_length = blocksize;
  1856. ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
  1857. tfm_ctx->hmac_key);
  1858. if (ret)
  1859. return ret;
  1860. } else {
  1861. memcpy(tfm_ctx->hmac_key, key, keylen);
  1862. tfm_ctx->hmac_key_length = keylen;
  1863. }
  1864. return 0;
  1865. }
  1866. static int
  1867. artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
  1868. {
  1869. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1870. enum artpec6_crypto_variant variant = ac->variant;
  1871. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1872. u32 oper;
  1873. memset(req_ctx, 0, sizeof(*req_ctx));
  1874. req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
  1875. if (hmac)
  1876. req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
  1877. switch (type) {
  1878. case ARTPEC6_CRYPTO_HASH_SHA1:
  1879. oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
  1880. break;
  1881. case ARTPEC6_CRYPTO_HASH_SHA256:
  1882. oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
  1883. break;
  1884. default:
  1885. pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
  1886. return -EINVAL;
  1887. }
  1888. if (variant == ARTPEC6_CRYPTO)
  1889. req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
  1890. else
  1891. req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
  1892. return 0;
  1893. }
  1894. static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
  1895. {
  1896. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1897. int ret;
  1898. if (!req_ctx->common.dma) {
  1899. ret = artpec6_crypto_common_init(&req_ctx->common,
  1900. &req->base,
  1901. artpec6_crypto_complete_hash,
  1902. NULL, 0);
  1903. if (ret)
  1904. return ret;
  1905. }
  1906. ret = artpec6_crypto_prepare_hash(req);
  1907. switch (ret) {
  1908. case ARTPEC6_CRYPTO_PREPARE_HASH_START:
  1909. ret = artpec6_crypto_submit(&req_ctx->common);
  1910. break;
  1911. case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
  1912. ret = 0;
  1913. fallthrough;
  1914. default:
  1915. artpec6_crypto_common_destroy(&req_ctx->common);
  1916. break;
  1917. }
  1918. return ret;
  1919. }
  1920. static int artpec6_crypto_hash_final(struct ahash_request *req)
  1921. {
  1922. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1923. req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
  1924. return artpec6_crypto_prepare_submit_hash(req);
  1925. }
  1926. static int artpec6_crypto_hash_update(struct ahash_request *req)
  1927. {
  1928. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1929. req_ctx->hash_flags |= HASH_FLAG_UPDATE;
  1930. return artpec6_crypto_prepare_submit_hash(req);
  1931. }
  1932. static int artpec6_crypto_sha1_init(struct ahash_request *req)
  1933. {
  1934. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
  1935. }
  1936. static int artpec6_crypto_sha1_digest(struct ahash_request *req)
  1937. {
  1938. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1939. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
  1940. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1941. return artpec6_crypto_prepare_submit_hash(req);
  1942. }
  1943. static int artpec6_crypto_sha256_init(struct ahash_request *req)
  1944. {
  1945. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
  1946. }
  1947. static int artpec6_crypto_sha256_digest(struct ahash_request *req)
  1948. {
  1949. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1950. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
  1951. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1952. return artpec6_crypto_prepare_submit_hash(req);
  1953. }
  1954. static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
  1955. {
  1956. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
  1957. }
  1958. static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
  1959. {
  1960. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1961. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
  1962. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1963. return artpec6_crypto_prepare_submit_hash(req);
  1964. }
  1965. static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
  1966. const char *base_hash_name)
  1967. {
  1968. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
  1969. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1970. sizeof(struct artpec6_hash_request_context));
  1971. memset(tfm_ctx, 0, sizeof(*tfm_ctx));
  1972. if (base_hash_name) {
  1973. struct crypto_shash *child;
  1974. child = crypto_alloc_shash(base_hash_name, 0,
  1975. CRYPTO_ALG_NEED_FALLBACK);
  1976. if (IS_ERR(child))
  1977. return PTR_ERR(child);
  1978. tfm_ctx->child_hash = child;
  1979. }
  1980. return 0;
  1981. }
  1982. static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
  1983. {
  1984. return artpec6_crypto_ahash_init_common(tfm, NULL);
  1985. }
  1986. static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
  1987. {
  1988. return artpec6_crypto_ahash_init_common(tfm, "sha256");
  1989. }
  1990. static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
  1991. {
  1992. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
  1993. if (tfm_ctx->child_hash)
  1994. crypto_free_shash(tfm_ctx->child_hash);
  1995. memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
  1996. tfm_ctx->hmac_key_length = 0;
  1997. }
  1998. static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
  1999. {
  2000. const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
  2001. struct artpec6_hash_export_state *state = out;
  2002. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  2003. enum artpec6_crypto_variant variant = ac->variant;
  2004. BUILD_BUG_ON(sizeof(state->partial_buffer) !=
  2005. sizeof(ctx->partial_buffer));
  2006. BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
  2007. state->digcnt = ctx->digcnt;
  2008. state->partial_bytes = ctx->partial_bytes;
  2009. state->hash_flags = ctx->hash_flags;
  2010. if (variant == ARTPEC6_CRYPTO)
  2011. state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
  2012. else
  2013. state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
  2014. memcpy(state->partial_buffer, ctx->partial_buffer,
  2015. sizeof(state->partial_buffer));
  2016. memcpy(state->digeststate, ctx->digeststate,
  2017. sizeof(state->digeststate));
  2018. return 0;
  2019. }
  2020. static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
  2021. {
  2022. struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
  2023. const struct artpec6_hash_export_state *state = in;
  2024. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  2025. enum artpec6_crypto_variant variant = ac->variant;
  2026. memset(ctx, 0, sizeof(*ctx));
  2027. ctx->digcnt = state->digcnt;
  2028. ctx->partial_bytes = state->partial_bytes;
  2029. ctx->hash_flags = state->hash_flags;
  2030. if (variant == ARTPEC6_CRYPTO)
  2031. ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
  2032. else
  2033. ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
  2034. memcpy(ctx->partial_buffer, state->partial_buffer,
  2035. sizeof(state->partial_buffer));
  2036. memcpy(ctx->digeststate, state->digeststate,
  2037. sizeof(state->digeststate));
  2038. return 0;
  2039. }
  2040. static int init_crypto_hw(struct artpec6_crypto *ac)
  2041. {
  2042. enum artpec6_crypto_variant variant = ac->variant;
  2043. void __iomem *base = ac->base;
  2044. u32 out_descr_buf_size;
  2045. u32 out_data_buf_size;
  2046. u32 in_data_buf_size;
  2047. u32 in_descr_buf_size;
  2048. u32 in_stat_buf_size;
  2049. u32 in, out;
  2050. /*
  2051. * The PDMA unit contains 1984 bytes of internal memory for the OUT
  2052. * channels and 1024 bytes for the IN channel. This is an elastic
  2053. * memory used to internally store the descriptors and data. The values
  2054. * ares specified in 64 byte incremements. Trustzone buffers are not
  2055. * used at this stage.
  2056. */
  2057. out_data_buf_size = 16; /* 1024 bytes for data */
  2058. out_descr_buf_size = 15; /* 960 bytes for descriptors */
  2059. in_data_buf_size = 8; /* 512 bytes for data */
  2060. in_descr_buf_size = 4; /* 256 bytes for descriptors */
  2061. in_stat_buf_size = 4; /* 256 bytes for stat descrs */
  2062. BUILD_BUG_ON_MSG((out_data_buf_size
  2063. + out_descr_buf_size) * 64 > 1984,
  2064. "Invalid OUT configuration");
  2065. BUILD_BUG_ON_MSG((in_data_buf_size
  2066. + in_descr_buf_size
  2067. + in_stat_buf_size) * 64 > 1024,
  2068. "Invalid IN configuration");
  2069. in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
  2070. FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
  2071. FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
  2072. out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
  2073. FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
  2074. writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
  2075. writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
  2076. if (variant == ARTPEC6_CRYPTO) {
  2077. writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
  2078. writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
  2079. writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
  2080. A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
  2081. base + A6_PDMA_INTR_MASK);
  2082. } else {
  2083. writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
  2084. writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
  2085. writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
  2086. A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
  2087. base + A7_PDMA_INTR_MASK);
  2088. }
  2089. return 0;
  2090. }
  2091. static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
  2092. {
  2093. enum artpec6_crypto_variant variant = ac->variant;
  2094. void __iomem *base = ac->base;
  2095. if (variant == ARTPEC6_CRYPTO) {
  2096. writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
  2097. writel_relaxed(0, base + A6_PDMA_IN_CFG);
  2098. writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
  2099. } else {
  2100. writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
  2101. writel_relaxed(0, base + A7_PDMA_IN_CFG);
  2102. writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
  2103. }
  2104. writel_relaxed(0, base + PDMA_OUT_CFG);
  2105. }
  2106. static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
  2107. {
  2108. struct artpec6_crypto *ac = dev_id;
  2109. enum artpec6_crypto_variant variant = ac->variant;
  2110. void __iomem *base = ac->base;
  2111. u32 mask_in_data, mask_in_eop_flush;
  2112. u32 in_cmd_flush_stat, in_cmd_reg;
  2113. u32 ack_intr_reg;
  2114. u32 ack = 0;
  2115. u32 intr;
  2116. if (variant == ARTPEC6_CRYPTO) {
  2117. intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
  2118. mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
  2119. mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
  2120. in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
  2121. in_cmd_reg = A6_PDMA_IN_CMD;
  2122. ack_intr_reg = A6_PDMA_ACK_INTR;
  2123. } else {
  2124. intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
  2125. mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
  2126. mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
  2127. in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
  2128. in_cmd_reg = A7_PDMA_IN_CMD;
  2129. ack_intr_reg = A7_PDMA_ACK_INTR;
  2130. }
  2131. /* We get two interrupt notifications from each job.
  2132. * The in_data means all data was sent to memory and then
  2133. * we request a status flush command to write the per-job
  2134. * status to its status vector. This ensures that the
  2135. * tasklet can detect exactly how many submitted jobs
  2136. * that have finished.
  2137. */
  2138. if (intr & mask_in_data)
  2139. ack |= mask_in_data;
  2140. if (intr & mask_in_eop_flush)
  2141. ack |= mask_in_eop_flush;
  2142. else
  2143. writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
  2144. writel_relaxed(ack, base + ack_intr_reg);
  2145. if (intr & mask_in_eop_flush)
  2146. tasklet_schedule(&ac->task);
  2147. return IRQ_HANDLED;
  2148. }
  2149. /*------------------- Algorithm definitions ----------------------------------*/
  2150. /* Hashes */
  2151. static struct ahash_alg hash_algos[] = {
  2152. /* SHA-1 */
  2153. {
  2154. .init = artpec6_crypto_sha1_init,
  2155. .update = artpec6_crypto_hash_update,
  2156. .final = artpec6_crypto_hash_final,
  2157. .digest = artpec6_crypto_sha1_digest,
  2158. .import = artpec6_crypto_hash_import,
  2159. .export = artpec6_crypto_hash_export,
  2160. .halg.digestsize = SHA1_DIGEST_SIZE,
  2161. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2162. .halg.base = {
  2163. .cra_name = "sha1",
  2164. .cra_driver_name = "artpec-sha1",
  2165. .cra_priority = 300,
  2166. .cra_flags = CRYPTO_ALG_ASYNC |
  2167. CRYPTO_ALG_ALLOCATES_MEMORY,
  2168. .cra_blocksize = SHA1_BLOCK_SIZE,
  2169. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2170. .cra_alignmask = 3,
  2171. .cra_module = THIS_MODULE,
  2172. .cra_init = artpec6_crypto_ahash_init,
  2173. .cra_exit = artpec6_crypto_ahash_exit,
  2174. }
  2175. },
  2176. /* SHA-256 */
  2177. {
  2178. .init = artpec6_crypto_sha256_init,
  2179. .update = artpec6_crypto_hash_update,
  2180. .final = artpec6_crypto_hash_final,
  2181. .digest = artpec6_crypto_sha256_digest,
  2182. .import = artpec6_crypto_hash_import,
  2183. .export = artpec6_crypto_hash_export,
  2184. .halg.digestsize = SHA256_DIGEST_SIZE,
  2185. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2186. .halg.base = {
  2187. .cra_name = "sha256",
  2188. .cra_driver_name = "artpec-sha256",
  2189. .cra_priority = 300,
  2190. .cra_flags = CRYPTO_ALG_ASYNC |
  2191. CRYPTO_ALG_ALLOCATES_MEMORY,
  2192. .cra_blocksize = SHA256_BLOCK_SIZE,
  2193. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2194. .cra_alignmask = 3,
  2195. .cra_module = THIS_MODULE,
  2196. .cra_init = artpec6_crypto_ahash_init,
  2197. .cra_exit = artpec6_crypto_ahash_exit,
  2198. }
  2199. },
  2200. /* HMAC SHA-256 */
  2201. {
  2202. .init = artpec6_crypto_hmac_sha256_init,
  2203. .update = artpec6_crypto_hash_update,
  2204. .final = artpec6_crypto_hash_final,
  2205. .digest = artpec6_crypto_hmac_sha256_digest,
  2206. .import = artpec6_crypto_hash_import,
  2207. .export = artpec6_crypto_hash_export,
  2208. .setkey = artpec6_crypto_hash_set_key,
  2209. .halg.digestsize = SHA256_DIGEST_SIZE,
  2210. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2211. .halg.base = {
  2212. .cra_name = "hmac(sha256)",
  2213. .cra_driver_name = "artpec-hmac-sha256",
  2214. .cra_priority = 300,
  2215. .cra_flags = CRYPTO_ALG_ASYNC |
  2216. CRYPTO_ALG_ALLOCATES_MEMORY,
  2217. .cra_blocksize = SHA256_BLOCK_SIZE,
  2218. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2219. .cra_alignmask = 3,
  2220. .cra_module = THIS_MODULE,
  2221. .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
  2222. .cra_exit = artpec6_crypto_ahash_exit,
  2223. }
  2224. },
  2225. };
  2226. /* Crypto */
  2227. static struct skcipher_alg crypto_algos[] = {
  2228. /* AES - ECB */
  2229. {
  2230. .base = {
  2231. .cra_name = "ecb(aes)",
  2232. .cra_driver_name = "artpec6-ecb-aes",
  2233. .cra_priority = 300,
  2234. .cra_flags = CRYPTO_ALG_ASYNC |
  2235. CRYPTO_ALG_ALLOCATES_MEMORY,
  2236. .cra_blocksize = AES_BLOCK_SIZE,
  2237. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2238. .cra_alignmask = 3,
  2239. .cra_module = THIS_MODULE,
  2240. },
  2241. .min_keysize = AES_MIN_KEY_SIZE,
  2242. .max_keysize = AES_MAX_KEY_SIZE,
  2243. .setkey = artpec6_crypto_cipher_set_key,
  2244. .encrypt = artpec6_crypto_encrypt,
  2245. .decrypt = artpec6_crypto_decrypt,
  2246. .init = artpec6_crypto_aes_ecb_init,
  2247. .exit = artpec6_crypto_aes_exit,
  2248. },
  2249. /* AES - CTR */
  2250. {
  2251. .base = {
  2252. .cra_name = "ctr(aes)",
  2253. .cra_driver_name = "artpec6-ctr-aes",
  2254. .cra_priority = 300,
  2255. .cra_flags = CRYPTO_ALG_ASYNC |
  2256. CRYPTO_ALG_ALLOCATES_MEMORY |
  2257. CRYPTO_ALG_NEED_FALLBACK,
  2258. .cra_blocksize = 1,
  2259. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2260. .cra_alignmask = 3,
  2261. .cra_module = THIS_MODULE,
  2262. },
  2263. .min_keysize = AES_MIN_KEY_SIZE,
  2264. .max_keysize = AES_MAX_KEY_SIZE,
  2265. .ivsize = AES_BLOCK_SIZE,
  2266. .setkey = artpec6_crypto_cipher_set_key,
  2267. .encrypt = artpec6_crypto_ctr_encrypt,
  2268. .decrypt = artpec6_crypto_ctr_decrypt,
  2269. .init = artpec6_crypto_aes_ctr_init,
  2270. .exit = artpec6_crypto_aes_ctr_exit,
  2271. },
  2272. /* AES - CBC */
  2273. {
  2274. .base = {
  2275. .cra_name = "cbc(aes)",
  2276. .cra_driver_name = "artpec6-cbc-aes",
  2277. .cra_priority = 300,
  2278. .cra_flags = CRYPTO_ALG_ASYNC |
  2279. CRYPTO_ALG_ALLOCATES_MEMORY,
  2280. .cra_blocksize = AES_BLOCK_SIZE,
  2281. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2282. .cra_alignmask = 3,
  2283. .cra_module = THIS_MODULE,
  2284. },
  2285. .min_keysize = AES_MIN_KEY_SIZE,
  2286. .max_keysize = AES_MAX_KEY_SIZE,
  2287. .ivsize = AES_BLOCK_SIZE,
  2288. .setkey = artpec6_crypto_cipher_set_key,
  2289. .encrypt = artpec6_crypto_encrypt,
  2290. .decrypt = artpec6_crypto_decrypt,
  2291. .init = artpec6_crypto_aes_cbc_init,
  2292. .exit = artpec6_crypto_aes_exit
  2293. },
  2294. /* AES - XTS */
  2295. {
  2296. .base = {
  2297. .cra_name = "xts(aes)",
  2298. .cra_driver_name = "artpec6-xts-aes",
  2299. .cra_priority = 300,
  2300. .cra_flags = CRYPTO_ALG_ASYNC |
  2301. CRYPTO_ALG_ALLOCATES_MEMORY,
  2302. .cra_blocksize = 1,
  2303. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2304. .cra_alignmask = 3,
  2305. .cra_module = THIS_MODULE,
  2306. },
  2307. .min_keysize = 2*AES_MIN_KEY_SIZE,
  2308. .max_keysize = 2*AES_MAX_KEY_SIZE,
  2309. .ivsize = 16,
  2310. .setkey = artpec6_crypto_xts_set_key,
  2311. .encrypt = artpec6_crypto_encrypt,
  2312. .decrypt = artpec6_crypto_decrypt,
  2313. .init = artpec6_crypto_aes_xts_init,
  2314. .exit = artpec6_crypto_aes_exit,
  2315. },
  2316. };
  2317. static struct aead_alg aead_algos[] = {
  2318. {
  2319. .init = artpec6_crypto_aead_init,
  2320. .setkey = artpec6_crypto_aead_set_key,
  2321. .encrypt = artpec6_crypto_aead_encrypt,
  2322. .decrypt = artpec6_crypto_aead_decrypt,
  2323. .ivsize = GCM_AES_IV_SIZE,
  2324. .maxauthsize = AES_BLOCK_SIZE,
  2325. .base = {
  2326. .cra_name = "gcm(aes)",
  2327. .cra_driver_name = "artpec-gcm-aes",
  2328. .cra_priority = 300,
  2329. .cra_flags = CRYPTO_ALG_ASYNC |
  2330. CRYPTO_ALG_ALLOCATES_MEMORY |
  2331. CRYPTO_ALG_KERN_DRIVER_ONLY,
  2332. .cra_blocksize = 1,
  2333. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2334. .cra_alignmask = 3,
  2335. .cra_module = THIS_MODULE,
  2336. },
  2337. }
  2338. };
  2339. #ifdef CONFIG_DEBUG_FS
  2340. struct dbgfs_u32 {
  2341. char *name;
  2342. mode_t mode;
  2343. u32 *flag;
  2344. char *desc;
  2345. };
  2346. static struct dentry *dbgfs_root;
  2347. static void artpec6_crypto_init_debugfs(void)
  2348. {
  2349. dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
  2350. #ifdef CONFIG_FAULT_INJECTION
  2351. fault_create_debugfs_attr("fail_status_read", dbgfs_root,
  2352. &artpec6_crypto_fail_status_read);
  2353. fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
  2354. &artpec6_crypto_fail_dma_array_full);
  2355. #endif
  2356. }
  2357. static void artpec6_crypto_free_debugfs(void)
  2358. {
  2359. debugfs_remove_recursive(dbgfs_root);
  2360. dbgfs_root = NULL;
  2361. }
  2362. #endif
  2363. static const struct of_device_id artpec6_crypto_of_match[] = {
  2364. { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
  2365. { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
  2366. {}
  2367. };
  2368. MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
  2369. static int artpec6_crypto_probe(struct platform_device *pdev)
  2370. {
  2371. const struct of_device_id *match;
  2372. enum artpec6_crypto_variant variant;
  2373. struct artpec6_crypto *ac;
  2374. struct device *dev = &pdev->dev;
  2375. void __iomem *base;
  2376. int irq;
  2377. int err;
  2378. if (artpec6_crypto_dev)
  2379. return -ENODEV;
  2380. match = of_match_node(artpec6_crypto_of_match, dev->of_node);
  2381. if (!match)
  2382. return -EINVAL;
  2383. variant = (enum artpec6_crypto_variant)match->data;
  2384. base = devm_platform_ioremap_resource(pdev, 0);
  2385. if (IS_ERR(base))
  2386. return PTR_ERR(base);
  2387. irq = platform_get_irq(pdev, 0);
  2388. if (irq < 0)
  2389. return -ENODEV;
  2390. ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
  2391. GFP_KERNEL);
  2392. if (!ac)
  2393. return -ENOMEM;
  2394. platform_set_drvdata(pdev, ac);
  2395. ac->variant = variant;
  2396. spin_lock_init(&ac->queue_lock);
  2397. INIT_LIST_HEAD(&ac->queue);
  2398. INIT_LIST_HEAD(&ac->pending);
  2399. timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
  2400. ac->base = base;
  2401. ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
  2402. sizeof(struct artpec6_crypto_dma_descriptors),
  2403. 64,
  2404. 0,
  2405. NULL);
  2406. if (!ac->dma_cache)
  2407. return -ENOMEM;
  2408. #ifdef CONFIG_DEBUG_FS
  2409. artpec6_crypto_init_debugfs();
  2410. #endif
  2411. tasklet_init(&ac->task, artpec6_crypto_task,
  2412. (unsigned long)ac);
  2413. ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
  2414. GFP_KERNEL);
  2415. if (!ac->pad_buffer)
  2416. return -ENOMEM;
  2417. ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
  2418. ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
  2419. GFP_KERNEL);
  2420. if (!ac->zero_buffer)
  2421. return -ENOMEM;
  2422. ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
  2423. err = init_crypto_hw(ac);
  2424. if (err)
  2425. goto free_cache;
  2426. err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
  2427. "artpec6-crypto", ac);
  2428. if (err)
  2429. goto disable_hw;
  2430. artpec6_crypto_dev = &pdev->dev;
  2431. err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2432. if (err) {
  2433. dev_err(dev, "Failed to register ahashes\n");
  2434. goto disable_hw;
  2435. }
  2436. err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2437. if (err) {
  2438. dev_err(dev, "Failed to register ciphers\n");
  2439. goto unregister_ahashes;
  2440. }
  2441. err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
  2442. if (err) {
  2443. dev_err(dev, "Failed to register aeads\n");
  2444. goto unregister_algs;
  2445. }
  2446. return 0;
  2447. unregister_algs:
  2448. crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2449. unregister_ahashes:
  2450. crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2451. disable_hw:
  2452. artpec6_crypto_disable_hw(ac);
  2453. free_cache:
  2454. kmem_cache_destroy(ac->dma_cache);
  2455. return err;
  2456. }
  2457. static int artpec6_crypto_remove(struct platform_device *pdev)
  2458. {
  2459. struct artpec6_crypto *ac = platform_get_drvdata(pdev);
  2460. int irq = platform_get_irq(pdev, 0);
  2461. crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2462. crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2463. crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
  2464. tasklet_disable(&ac->task);
  2465. devm_free_irq(&pdev->dev, irq, ac);
  2466. tasklet_kill(&ac->task);
  2467. del_timer_sync(&ac->timer);
  2468. artpec6_crypto_disable_hw(ac);
  2469. kmem_cache_destroy(ac->dma_cache);
  2470. #ifdef CONFIG_DEBUG_FS
  2471. artpec6_crypto_free_debugfs();
  2472. #endif
  2473. return 0;
  2474. }
  2475. static struct platform_driver artpec6_crypto_driver = {
  2476. .probe = artpec6_crypto_probe,
  2477. .remove = artpec6_crypto_remove,
  2478. .driver = {
  2479. .name = "artpec6-crypto",
  2480. .of_match_table = artpec6_crypto_of_match,
  2481. },
  2482. };
  2483. module_platform_driver(artpec6_crypto_driver);
  2484. MODULE_AUTHOR("Axis Communications AB");
  2485. MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
  2486. MODULE_LICENSE("GPL");